Beispiel #1
0
def weekly_update(ComicName,IssueNumber,CStatus,CID,futurepull=None):
    logger.fdebug('weekly_update of table : ' + str(ComicName) + ' #:' + str(IssueNumber))
    logger.fdebug('weekly_update of table : ' + str(CStatus))
    # here we update status of weekly table...
    # added Issue to stop false hits on series' that have multiple releases in a week
    # added CStatus to update status flags on Pullist screen
    myDB = db.DBConnection()
    if futurepull is None:
        issuecheck = myDB.action("SELECT * FROM weekly WHERE COMIC=? AND ISSUE=?", [ComicName,IssueNumber]).fetchone()
    else:
        issuecheck = myDB.action("SELECT * FROM future WHERE COMIC=? AND ISSUE=?", [ComicName,IssueNumber]).fetchone()
    if issuecheck is not None:
        controlValue = { "COMIC":         str(ComicName),
                         "ISSUE":         str(IssueNumber)}
        if CStatus:
            newValue = {"STATUS":             CStatus,
                        "ComicID":            CID}
        else:
            if mylar.AUTOWANT_UPCOMING:
                newValue = {"STATUS":             "Wanted"}
            else:
                newValue = {"STATUS":             "Skipped"}

        if futurepull is None:
            myDB.upsert("weekly", newValue, controlValue)
        else:
            if issuecheck['ComicID'] is not None:
                newValue = {"STATUS":       "Wanted",
                            "ComicID":      issuecheck['ComicID']}

            myDB.upsert("future", newValue, controlValue)
Beispiel #2
0
def storyarcinfo(xmlid):

    comicLibrary = listLibrary()

    arcinfo = {}

    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn('You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.')
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    #respawn to the exact id for the story arc and count the # of issues present.
    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
    logger.fdebug('arcpull_url:' + str(ARCPULL_URL))
    if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
        chkit = cvapi_check()
        if chkit == False:
            return 'apireached'
    try:
        file = urllib2.urlopen(ARCPULL_URL)
    except urllib2.HTTPError, err:
        logger.error('err : ' + str(err))
        logger.error('There was a major problem retrieving data from ComicVine - on their end.')
        return
Beispiel #3
0
def rssdbupdate(feeddata, i, type):
    rsschktime = 15
    myDB = db.DBConnection()

    # let's add the entries into the db so as to save on searches
    # also to build up the ID's ;)

    for dataval in feeddata:

        if type == "torrent":
            # we just store the torrent ID's now.
            if dataval["site"] == "32P":
                newlink = dataval["link"]
            else:
                # store the hash/id from KAT
                newlink = os.path.basename(re.sub(".torrent", "", dataval["link"][: dataval["link"].find("?title")]))

            newVal = {"Link": newlink, "Pubdate": dataval["pubdate"], "Site": dataval["site"], "Size": dataval["size"]}
            ctrlVal = {"Title": dataval["title"]}

        else:
            newlink = dataval["Link"]
            newVal = {"Link": newlink, "Pubdate": dataval["Pubdate"], "Site": dataval["Site"], "Size": dataval["Size"]}
            ctrlVal = {"Title": dataval["Title"]}

        myDB.upsert("rssdb", newVal, ctrlVal)

    logger.fdebug("Completed adding new data to RSS DB. Next add in " + str(mylar.RSS_CHECKINTERVAL) + " minutes")
    return
Beispiel #4
0
    def writeconfig(self, values=None):
        logger.fdebug("Writing configuration to file")
        self.provider_sequence()
        config.set('Newznab', 'extra_newznabs', ', '.join(self.write_extras(self.EXTRA_NEWZNABS)))
        config.set('Torznab', 'extra_torznabs', ', '.join(self.write_extras(self.EXTRA_TORZNABS)))

        ###this should be moved elsewhere...
        if type(self.BLACKLISTED_PUBLISHERS) != list:
            if self.BLACKLISTED_PUBLISHERS is None:
                bp = 'None'
            else:
                bp = ', '.join(self.write_extras(self.BLACKLISTED_PUBLISHERS))
            config.set('CV', 'blacklisted_publishers', bp)
        else:
            config.set('CV', 'blacklisted_publishers', ', '.join(self.BLACKLISTED_PUBLISHERS))
        ###
        config.set('General', 'dynamic_update', str(self.DYNAMIC_UPDATE))

        if values is not None:
            self.process_kwargs(values)

        try:
            with codecs.open(self._config_file, encoding='utf8', mode='w+') as configfile:
                config.write(configfile)
            logger.fdebug('Configuration written to disk.')
        except IOError as e:
            logger.warn("Error writing configuration file: %s", e)
Beispiel #5
0
    def comic_config(self, com_location, ComicID, alt_search=None, fuzzy_year=None):
        myDB = db.DBConnection()
#--- this is for multipe search terms............
#--- works, just need to redo search.py to accomodate multiple search terms
#        ffs_alt = []
#        if '+' in alt_search:
            #find first +
#            ffs = alt_search.find('+')
#            ffs_alt.append(alt_search[:ffs])
#            ffs_alt_st = str(ffs_alt[0])
#            print("ffs_alt: " + str(ffs_alt[0]))

            # split the entire string by the delimter + 
#            ffs_test = alt_search.split('+')
#            if len(ffs_test) > 0:
#                print("ffs_test names: " + str(len(ffs_test)))
#                ffs_count = len(ffs_test)
#                n=1
#                while (n < ffs_count):
#                    ffs_alt.append(ffs_test[n])
#                    print("adding : " + str(ffs_test[n]))
                    #print("ffs_alt : " + str(ffs_alt))
#                    ffs_alt_st = str(ffs_alt_st) + "..." + str(ffs_test[n])
#                    n+=1
#            asearch = ffs_alt
#        else:
#            asearch = alt_search
        asearch = str(alt_search)

        controlValueDict = {'ComicID': ComicID}
        newValues = {"ComicLocation":        com_location }
                     #"QUALalt_vers":         qual_altvers,
                     #"QUALScanner":          qual_scanner,
                     #"QUALtype":             qual_type,
                     #"QUALquality":          qual_quality
                     #}
        if asearch is not None:
            if asearch == '':
                newValues['AlternateSearch'] = "None"
            else:
                newValues['AlternateSearch'] = str(asearch)

        if fuzzy_year is None:
            newValues['UseFuzzy'] = "0"
        else:
            newValues['UseFuzzy'] = str(fuzzy_year)

        #force the check/creation of directory com_location here
        if os.path.isdir(str(com_location)):
            logger.info(u"Validating Directory (" + str(com_location) + "). Already exists! Continuing...")
        else:
            logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
            try:
                os.makedirs(str(com_location))
                logger.info(u"Directory successfully created at: " + str(com_location))
            except OSError:
                logger.error(u"Could not create comicdir : " + str(com_location))

        myDB.upsert("comics", newValues, controlValueDict)
        raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
Beispiel #6
0
    def notify(self, prline=None, prline2=None, sent_to=None, snatched_nzb=None, force=False, module=None, snline=None):
        """
        Sends a boxcar notification based on the provided info or SB config

        title: The title of the notification to send
        message: The message string to send
        force: If True then the notification will be sent even if Boxcar is disabled in the config
        """
        if module is None:
            module = ''
        module += '[NOTIFIER]'

        if not mylar.BOXCAR_ENABLED and not force:
            logger.fdebug(module + ' Notification for Boxcar not enabled, skipping this notification.')
            return False

        # if no username was given then use the one from the config
        if snatched_nzb:
            title = snline
            message = "Mylar has snatched: " + snatched_nzb + " and has sent it to " + sent_to
        else:
            title = prline
            message = prline2

        logger.info(module + ' Sending notification to Boxcar2')

        self._sendBoxcar(message, title, module)
        return True
Beispiel #7
0
        def __init__(self, un, pw, session_path=None):
            '''
                Params:
                    un: account username (required)
                    pw: account password (required)
                    session_path: the path to the actual file you want to persist your cookies in
                                If blank, saves to $HOME/.32p_cookies.dat

            '''
            self.module = '[32P-AUTHENTICATION]'
            try:
                self.ses = cfscrape.create_scraper()
            except Exception as e:
                logger.error(self.module + " Can't create session with cfscrape")

            self.session_path = session_path if session_path is not None else os.path.join(mylar.CACHE_DIR, ".32p_cookies.dat")
            self.ses.cookies = LWPCookieJar(self.session_path)
            if not os.path.exists(self.session_path):
                logger.fdebug(self.module + ' Session cookie does not exist. Signing in and Creating.')
                self.ses.cookies.save()
            else:
                logger.fdebug(self.module + ' Session cookie found. Attempting to load...')
                self.ses.cookies.load(ignore_discard=True)
            self.un = un
            self.pw = pw
            self.authkey = None
            self.passkey = None
            self.uid = None
            self.inkdrops = None
Beispiel #8
0
def torsend2client(seriesname, linkit, site):
    logger.info('matched on ' + str(seriesname))
    filename = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',seriesname)
    if site == 'ComicBT':
        logger.info(linkit)
        linkit = str(linkit) + '&passkey=' + str(mylar.CBT_PASSKEY)

    if linkit[-7:] != "torrent":
        filename += ".torrent"

    request = urllib2.Request(linkit)
    request.add_header('User-Agent', str(mylar.USER_AGENT))
    if mylar.TORRENT_LOCAL and mylar.LOCAL_WATCHDIR is not None:
        filepath = os.path.join(mylar.LOCAL_WATCHDIR, filename)
        logger.fdebug('filename for torrent set to : ' + filepath)
    elif mylar.TORRENT_SEEDBOX and mylar.SEEDBOX_WATCHDIR is not None:
        filepath = os.path.join(mylar.CACHE_DIR, filename)
        logger.fdebug('filename for torrent set to : ' + filepath)
    else:
        logger.error('No Local Watch Directory or Seedbox Watch Directory specified. Set it and try again.')
        return "fail"

    try:
        opener = helpers.urlretrieve(urllib2.urlopen(request), filepath)
    except Exception, e:
        logger.warn('Error fetching data from %s: %s' % (site, e))
        return "fail"
Beispiel #9
0
def storyarcinfo(xmlid):

    comicLibrary = listLibrary()

    arcinfo = {}

    if mylar.COMICVINE_API == 'None' or mylar.COMICVINE_API is None or mylar.COMICVINE_API == mylar.DEFAULT_CVAPI:
        logger.warn('You have not specified your own ComicVine API key - alot of things will be limited. Get your own @ http://api.comicvine.com.')
        comicapi = mylar.DEFAULT_CVAPI
    else:
        comicapi = mylar.COMICVINE_API

    #respawn to the exact id for the story arc and count the # of issues present.
    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
    logger.fdebug('arcpull_url:' + str(ARCPULL_URL))

    #new CV API restriction - one api request / second.
    if mylar.CVAPI_RATE is None or mylar.CVAPI_RATE < 2:
        time.sleep(2)
    else:
        time.sleep(mylar.CVAPI_RATE)

    #download the file:
    payload = None
    verify = False

    try:
        r = requests.get(ARCPULL_URL, params=payload, verify=verify, headers=mylar.CV_HEADERS)
    except Exception, e:
        logger.warn('Error fetching data from ComicVine: %s' % (e))
        return
Beispiel #10
0
def rssdbupdate(feeddata, i, type):
    rsschktime = 15
    myDB = db.DBConnection()

    #let's add the entries into the db so as to save on searches
    #also to build up the ID's ;)

    for dataval in feeddata:

        if type == 'torrent':
            #we just store the torrent ID's now.

            newVal = {"Link":      dataval['link'],
                      "Pubdate":   dataval['pubdate'],
                      "Site":      dataval['site'],
                      "Size":      dataval['size']}
            ctrlVal = {"Title":    dataval['title']}

        else:
            newlink = dataval['Link']
            newVal = {"Link":      newlink,
                      "Pubdate":   dataval['Pubdate'],
                      "Site":      dataval['Site'],
                      "Size":      dataval['Size']}
            ctrlVal = {"Title":    dataval['Title']}

        myDB.upsert("rssdb", newVal, ctrlVal)

    logger.fdebug('Completed adding new data to RSS DB. Next add in ' + str(mylar.RSS_CHECKINTERVAL) + ' minutes')
    return
Beispiel #11
0
    def notify(self, ComicName=None, Year=None, Issue=None, sent_to=None, snatched_nzb=None, username=None, force=False):
        """
        Sends a boxcar notification based on the provided info or SB config

        title: The title of the notification to send
        message: The message string to send
        username: The username to send the notification to (optional, defaults to the username in the config)
        force: If True then the notification will be sent even if Boxcar is disabled in the config
        """

        if not mylar.BOXCAR_ENABLED and not force:
            logger.fdebug("Notification for Boxcar not enabled, skipping this notification")
            return False

        # if no username was given then use the one from the config
        if not username:
            username = mylar.BOXCAR_USERNAME


        if snatched_nzb:
            title = "Mylar. Sucessfully Snatched!"
            message = "Mylar has snatched: " + snatched_nzb + " and has sent it to " + sent_to
        else:
            title = "Mylar. Successfully Downloaded & Post-Processed!"
            message = "Mylar has downloaded and postprocessed: " + ComicName + ' (' + Year + ') #' + Issue


        logger.info("Sending notification to Boxcar")

        self._sendBoxcar(message, title, username)
        return True
Beispiel #12
0
    def notify(self, ComicName=None, Year=None, Issue=None, sent_to=None, snatched_nzb=None, force=False, module=None):
        """
        Sends a boxcar notification based on the provided info or SB config

        title: The title of the notification to send
        message: The message string to send
        force: If True then the notification will be sent even if Boxcar is disabled in the config
        """
        if module is None:
            module = ''
        module += '[NOTIFIER]'

        if not mylar.BOXCAR_ENABLED and not force:
            logger.fdebug(module + ' Notification for Boxcar not enabled, skipping this notification.')
            return False

        # if no username was given then use the one from the config
        if snatched_nzb:
            title = "Mylar. Sucessfully Snatched!"
            message = "Mylar has snatched: " + snatched_nzb + " and has sent it to " + sent_to
        else:
            title = "Mylar. Successfully Downloaded & Post-Processed!"
            message = "Mylar has downloaded and postprocessed: " + ComicName + ' (' + Year + ') #' + Issue


        logger.info(module + ' Sending notification to Boxcar2')

        self._sendBoxcar(message, title, module)
        return True
Beispiel #13
0
        def login(self):
            '''
                This is generally the only method you'll want to call, as it handles testing test_skey_valid() before
                trying test_login().

                Returns: True (success) / False (failure)
                Side effects: Methods called will handle saving the cookies to disk, and setting
                              self.authkey, self.passkey, and self.uid
            '''
            if (self.test_skey_valid()):
                logger.fdebug('%s Session key-based login was good.' % self.module)
                self.method = 'Session Cookie retrieved OK.'
                return {'ses': self.ses,
                        'status': True}

            if (self.test_login()):
                logger.fdebug('%s Credential-based login was good.' % self.module)
                self.method = 'Credential-based login OK.'
                return {'ses': self.ses,
                        'status': True}

            logger.warn('%s Both session key and credential-based logins failed.' % self.module)
            self.method = 'Both session key & credential login failed.'
            return {'ses': self.ses,
                    'status': False}
Beispiel #14
0
def movefiles(comicid, comlocation, imported):
    #comlocation is destination
    #comicid is used for rename
    files_moved = []
    try:
        imported = ast.literal_eval(imported)
    except ValueError:
        pass

    myDB = db.DBConnection()

    logger.fdebug('comlocation is : ' + comlocation)
    logger.fdebug('original comicname is : ' + imported['ComicName'])

    impres = imported['filelisting']

    if impres is not None:
        for impr in impres:
            srcimp = impr['comiclocation']
            orig_filename = impr['comicfilename']
            #before moving check to see if Rename to Mylar structure is enabled.
            if mylar.IMP_RENAME and mylar.FILE_FORMAT != '':
                logger.fdebug("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT))
                renameit = helpers.rename_param(comicid, imported['ComicName'], impr['issuenumber'], orig_filename)
                nfilename = renameit['nfilename']
                dstimp = os.path.join(comlocation, nfilename)
            else:
                logger.fdebug("Renaming files not enabled, keeping original filename(s)")
                dstimp = os.path.join(comlocation, orig_filename)

            logger.info("moving " + srcimp + " ... to " + dstimp)
            try:
                shutil.move(srcimp, dstimp)
                files_moved.append({'srid':     imported['srid'],
                                    'filename': impr['comicfilename']})
            except (OSError, IOError):
                logger.error("Failed to move files - check directories and manually re-run.")

        logger.fdebug("all files moved.")
        #now that it's moved / renamed ... we remove it from importResults or mark as completed.

    if len(files_moved) > 0:
        logger.info('files_moved: ' + str(files_moved))
        for result in files_moved:
            try:
                res = result['import_id']
            except:
                #if it's an 'older' import that wasn't imported, just make it a basic match so things can move and update properly.
                controlValue = {"ComicFilename": result['filename'],
                                "SRID":          result['srid']}
                newValue = {"Status":            "Imported",
                            "ComicID":           comicid}
            else:                 
                controlValue = {"impID":         result['import_id'],
                                "ComicFilename": result['filename']}
                newValue = {"Status":            "Imported",
                            "SRID":              result['srid'],
                            "ComicID":           comicid}
            myDB.upsert("importresults", newValue, controlValue)
    return
Beispiel #15
0
    def downloadfile(self, payload, filepath):
        url = 'https://32pag.es/torrents.php'
        try:
            r = self.session.get(url, params=payload, verify=True, stream=True, allow_redirects=True)
        except Exception as e:
            logger.error('%s [%s] Could not POST URL %s' % ('[32P-DOWNLOADER]', e, url))
            return False

        if str(r.status_code) != '200':
            logger.warn('Unable to download torrent from 32P [Status Code returned: %s]' % r.status_code)
            if str(r.status_code) == '404':
                logger.warn('[32P-CACHED_ENTRY] Entry found in 32P cache - incorrect. Torrent has probably been merged into a pack, or another series id. Removing from cache.')
                self.delete_cache_entry(payload['id'])
            else:
                logger.fdebug('content: %s' % r.content)
            return False


        with open(filepath, 'wb') as f:
            for chunk in r.iter_content(chunk_size=1024):
                if chunk: # filter out keep-alive new chunks
                    f.write(chunk)
                    f.flush()

        return True
Beispiel #16
0
def sendtohome(sftp, remotepath, filelist, transport):
    fhost = mylar.CONFIG.TAB_HOST.find(':')
    host = mylar.CONFIG.TAB_HOST[:fhost]
    port = int(mylar.CONFIG.TAB_HOST[fhost +1:])

    successlist = []
    filestotal = len(filelist)

    for files in filelist:
        tempfile = files['filename']
        issid = files['issueid']
        logger.fdebug('Checking filename for problematic characters: ' + tempfile)
        #we need to make the required directory(ies)/subdirectories before the get will work.
        if u'\xb4' in files['filename']:
            # right quotation
            logger.fdebug('detected abnormal character in filename')
            filename = tempfile.replace('0xb4', '\'')
        if u'\xbd' in files['filename']:
            # 1/2 character
            filename = tempfile.replace('0xbd', 'half')
        if u'\uff1a' in files['filename']:
            #some unknown character
            filename = tempfile.replace('\0ff1a', '-')

        #now we encode the structure to ascii so we can write directories/filenames without error.
        filename = tempfile.encode('ascii', 'ignore')

        remdir = remotepath

        if mylar.CONFIG.MAINTAINSERIESFOLDER == 1:
            # Get folder path of issue
            comicdir = os.path.split(files['filepath'])[0]
            # Isolate comic folder name
            comicdir = os.path.split(comicdir)[1]
            logger.info('Checking for Comic Folder: ' + comicdir)
            chkdir = os.path.join(remdir, comicdir)
            try:
                sftp.stat(chkdir)
            except IOError, e:
                logger.info('Comic Folder does not Exist, creating ' + chkdir )
                try:
                    sftp.mkdir(chkdir)
                except :
                    # Fallback to default behavior
                    logger.info('Could not create Comic Folder, adding to device root')
                else :
                    remdir = chkdir
            else :
                remdir = chkdir

        localsend = files['filepath']
        logger.info('Sending : ' + localsend)
        remotesend = os.path.join(remdir, filename)
        logger.info('To : ' + remotesend)

        try:
            sftp.stat(remotesend)
        except IOError, e:
            if e[0] == 2:
                filechk = False
Beispiel #17
0
def rssdbupdate(feeddata,i,type):
    rsschktime = 15
    myDB = db.DBConnection()

    #let's add the entries into the db so as to save on searches
    #also to build up the ID's ;)
    x = 1
    while x <= i:
        try:
            dataval = feeddata[x]
        except IndexError:
            logger.fdebug('reached the end of populating. Exiting the process.')
            break
        #print "populating : " + str(dataval)
        #remove passkey so it doesn't end up in db
        if type == 'torrent':
            newlink = dataval['Link'][:(dataval['Link'].find('&passkey'))]
            newVal = {"Link":      newlink,
                      "Pubdate":   dataval['Pubdate'],
                      "Site":      dataval['Site']}
        else:
            newlink = dataval['Link']
            newVal = {"Link":      newlink,
                      "Pubdate":   dataval['Pubdate'],
                      "Site":      dataval['Site'],
                      "Size":      dataval['Size']}

        ctrlVal = {"Title":    dataval['Title']}

        myDB.upsert("rssdb", newVal,ctrlVal)

        x+=1

    logger.fdebug('Completed adding new data to RSS DB. Next add in ' + str(mylar.RSS_CHECKINTERVAL) + ' minutes')
    return
Beispiel #18
0
 def get_the_hash(self, filepath):
     # Open torrent file
     torrent_file = open(filepath, "rb")
     metainfo = bencode.decode(torrent_file.read())
     info = metainfo['info']
     thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
     logger.fdebug('Hash: ' + thehash)
     return thehash
Beispiel #19
0
        def valid_skey_attempt(self, skey):
            '''
                Not generally the proper method to call - call test_key_valid()
                instead - which calls this method.

                Attempts to fetch data via an ajax method that will fail if not
                authorized.  The parameter skey should be set to the string
                value of the cookie named session.

                Returns: True on success, False on failure.  Side Effects: Sets
                self.uid, self,authkey and self.passkey
            '''

            u = '''https://32pag.es/ajax.php'''
            params = {'action': 'index'}
            testcookie = dict(session=skey)

            try:
                r = self.ses.get(u, params=params, timeout=60, allow_redirects=False, cookies=testcookie)
            except Exception as e:
                logger.error('Got an exception [%s] trying to GET to: %s' % (e,u))
                self.error = {'status':'error', 'message':'exception trying to retrieve site'}
                return False

            if r.status_code != 200:
                if r.status_code == 302:
                    newloc = r.headers.get('Location', '')
                    logger.warn('Got redirect from the POST-ajax action=login GET: %s' % newloc)
                    self.error = {'status':'redirect-error', 'message':'got redirect from POST-ajax login action : ' + newloc}
                else:
                    logger.error('Got bad status code in the POST-ajax action=login GET: %s' % r.status_code)
                    self.error = {'status':'bad status code', 'message':'bad status code received in the POST-ajax login action :' + str(r.status_code)}
                return False

            try:
                j = r.json()
            except:
                logger.warn('Error - response from session-based skey check was not JSON: %s' % r.text)
                return False

            self.uid = j['response']['id']
            self.authkey = j['response']['authkey']
            self.passkey = pk = j['response']['passkey']

            try:
                d = self.ses.get('https://32pag.es/ajax.php', params={'action': 'user_inkdrops'}, verify=True, allow_redirects=True)
            except Exception as e:
                logger.error('Unable to retreive Inkdrop total : %s' % e)
            else:
                try:
                    self.inkdrops = d.json()
                except:
                    logger.error('Inkdrop result did not return valid JSON, unable to verify response')
                else:
                    logger.fdebug('inkdrops: %s' % self.inkdrops)

            return True
Beispiel #20
0
    def _parse_feed(site, url, verify):
        logger.fdebug('[RSS] Fetching items from ' + site)
        payload = None
        headers = {'User-Agent':      str(mylar.USER_AGENT)}

        try:
            r = requests.get(url, params=payload, verify=verify, headers=headers)
        except Exception, e:
            logger.warn('Error fetching RSS Feed Data from %s: %s' % (site, e))
            return
Beispiel #21
0
    def importIT(self):
        #set startup...
        if len(self.comiclist) > 0:
            self.sql_attach()
            query = "DELETE FROM maintenance"
            self.db.execute(query)
            query = "INSERT INTO maintenance (id, mode, total, status) VALUES (%s,'%s',%s,'%s')" % ('1', self.mode, len(self.comiclist), "running")
            self.db.execute(query)
            self.sql_close()
            logger.info('[MAINTENANCE-MODE][%s] Found %s series in previous db. Preparing to migrate into existing db.' % (self.mode.upper(), len(self.comiclist)))
            count = 1
            for x in self.comiclist:
                logger.info('[MAINTENANCE-MODE][%s] [%s/%s] now attempting to add %s to watchlist...' % (self.mode.upper(), count, len(self.comiclist), x['ComicID']))
                try:
                    self.sql_attach()
                    self.db.execute("UPDATE maintenance SET progress=?, total=?, current=? WHERE id='1'", (count, len(self.comiclist), re.sub('4050-', '', x['ComicID'].strip())))
                    self.sql_close()
                except Exception as e:
                    logger.warn('[ERROR] %s' % e)
                maintenance_info = importer.addComictoDB(re.sub('4050-', '', x['ComicID']).strip(), calledfrom='maintenance')
                try:
                    logger.info('MAINTENANCE: %s' % maintenance_info)
                    if maintenance_info['status'] == 'complete':
                        logger.fdebug('[MAINTENANCE-MODE][%s] Successfully added %s [%s] to watchlist.' % (self.mode.upper(), maintenance_info['comicname'], maintenance_info['year']))
                    else:
                        logger.fdebug('[MAINTENANCE-MODE][%s] Unable to add %s [%s] to watchlist.' % (self.mode.upper(), maintenance_info['comicname'], maintenance_info['year']))
                        raise IOError
                    self.maintenance_success.append(x)

                    try:
                        self.sql_attach()
                        self.db.execute("UPDATE maintenance SET progress=?, last_comicid=?, last_series=?, last_seriesyear=? WHERE id='1'", (count, re.sub('4050-', '', x['ComicID'].strip()), maintenance_info['comicname'], maintenance_info['year']))
                        self.sql_close()
                    except Exception as e:
                        logger.warn('[ERROR] %s' % e)


                except IOError as e:
                    logger.warn('[MAINTENANCE-MODE][%s] Unable to add series to watchlist: %s' % (self.mode.upper(), e))
                    self.maintenance_fail.append(x)

                count+=1
        else:
            logger.warn('[MAINTENANCE-MODE][%s] Unable to locate any series in db. This is probably a FATAL error and an unrecoverable db.' % self.mode.upper())
            return

        logger.info('[MAINTENANCE-MODE][%s] Successfully imported %s series into existing db.' % (self.mode.upper(), len(self.maintenance_success)))
        if len(self.maintenance_fail) > 0:
            logger.info('[MAINTENANCE-MODE][%s] Failed to import %s series into existing db: %s' % (self.mode.upper(), len(self.maintenance_success), self.maintenance_fail))
        try:
            self.sql_attach()
            self.db.execute("UPDATE maintenance SET status=? WHERE id='1'", ["completed"])
            self.sql_close()
        except Exception as e:
            logger.warn('[ERROR] %s' % e)
Beispiel #22
0
 def _deliverFile(self, **kwargs):
     logger.fdebug("_deliverFile: kwargs: %s" % kwargs)
     if 'file' not in kwargs:
         self.data = self._error_with_message('No file provided')
     elif 'filename' not in kwargs:
         self.data = self._error_with_message('No filename provided')
     else:
         #logger.fdebug("file name: %s" % str(kwargs['file'])
         self.filename = os.path.split(str(kwargs['file']))[1]
         self.file = str(kwargs['file'])
     return 
Beispiel #23
0
    def addtoreadlist(self):
        annualize = False
        myDB = db.DBConnection()
        readlist = myDB.selectone("SELECT * from issues where IssueID=?", [self.IssueID]).fetchone()
        if readlist is None:
            logger.fdebug(self.module + ' Checking against annuals..')
            readlist = myDB.selectone("SELECT * from annuals where IssueID=?", [self.IssueID]).fetchone()
            if readlist is None:
                logger.error(self.module + ' Cannot locate IssueID - aborting..')
                return
            else:
                annualize = True
        comicinfo = myDB.selectone("SELECT * from comics where ComicID=?", [readlist['ComicID']]).fetchone()
        logger.info(self.module + ' Attempting to add issueid ' + readlist['IssueID'])
        if comicinfo is None:
            logger.info(self.module + ' Issue not located on your current watchlist. I should probably check story-arcs but I do not have that capability just yet.')
        else:
            locpath = None
            if mylar.MULTIPLE_DEST_DIRS is not None and mylar.MULTIPLE_DEST_DIRS != 'None' and os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(comicinfo['ComicLocation'])) != comicinfo['ComicLocation']:
                pathdir = os.path.join(mylar.MULTIPLE_DEST_DIRS, os.path.basename(comicinfo['ComicLocation']))
                if os.path.exists(os.path.join(pathdir, readlist['Location'])):
                    locpath = os.path.join(pathdir, readlist['Location'])
                else:
                    if os.path.exists(os.path.join(comicinfo['ComicLocation'], readlist['Location'])):
                        locpath = os.path.join(comicinfo['ComicLocation'], readlist['Location'])
            else:
                if os.path.exists(os.path.join(comicinfo['ComicLocation'], readlist['Location'])):
                    locpath = os.path.join(comicinfo['ComicLocation'], readlist['Location'])

            if not locpath is None:
                comicissue = readlist['Issue_Number']
                comicname = comicinfo['ComicName']
                dspinfo = comicname + ' #' + comicissue
                if annualize:
                    if mylar.ANNUALS_ON:
                        comicissue = 'Annual ' + readlist['Issue_Number']
                        dspinfo = comicname + ' Annual #' + readlist['Issue_Number']
                    else:
                        comicname = comicinfo['ComicName'] + ' Annual'
                        dspinfo = comicname + ' #' + comicissue
                ctrlval = {"IssueID":       self.IssueID}
                newval = {"DateAdded":      helpers.today(),
                          "Status":         "Added",
                          "ComicID":        readlist['ComicID'],
                          "Issue_Number":   comicissue,
                          "IssueDate":      readlist['IssueDate'],
                          "SeriesYear":     comicinfo['ComicYear'],
                          "ComicName":      comicname,
                          "Location":       locpath}

                myDB.upsert("readlist", newval, ctrlval)
                logger.info(self.module + ' Added ' + dspinfo + ' to the Reading list.')
        return
Beispiel #24
0
    def notify(self, event, message=None, snatched_nzb=None, prov=None, sent_to=None, module=None):

        if module is None:
            module = ''
        module += '[NOTIFIER]'

        if snatched_nzb:
            if snatched_nzb[-1] == '\.': 
                snatched_nzb = snatched_nzb[:-1]
            message = "Mylar has snatched: " + snatched_nzb + " from " + prov + " and " + sent_to

        data = {'token': mylar.CONFIG.PUSHOVER_APIKEY,
                'user': mylar.CONFIG.PUSHOVER_USERKEY,
                'message': message.encode("utf-8"),
                'title': event,
                'priority': mylar.CONFIG.PUSHOVER_PRIORITY}

        if all([self.device is not None, self.device != 'None']):
            data.update({'device': self.device})

        r = self._session.post(self.PUSHOVER_URL, data=data, verify=True)

        if r.status_code == 200:
            try:
                response = r.json()
                if 'devices' in response and self.test is True:
                    logger.fdebug('%s Available devices: %s' % (module, response))
                    if any([self.device is None, self.device == 'None']):
                        self.device = 'all available devices'

                    r = self._session.post('https://api.pushover.net/1/messages.json', data=data, verify=True)
                    if r.status_code == 200:
                        logger.info('%s PushOver notifications sent to %s.' % (module, self.device))
                    elif r.status_code >=400 and r.status_code < 500:
                        logger.error('%s PushOver request failed to %s: %s' % (module, self.device, r.content))
                        return False
                    else:
                        logger.error('%s PushOver notification failed serverside.' % module)
                        return False
                else:
                    logger.info('%s PushOver notifications sent.' % module)
            except Exception as e:
                logger.warn('%s[ERROR] - %s' % (module, e))
                return False
            else:
                return True
        elif r.status_code >= 400 and r.status_code < 500:
            logger.error('%s PushOver request failed: %s' % (module, r.content))
            return False
        else:
            logger.error('%s PushOver notification failed serverside.' % module)
            return False
Beispiel #25
0
def dbUpdate():

    myDB = db.DBConnection()

    activecomics = myDB.select('SELECT ComicID, ComicName from comics WHERE Status="Active" or Status="Loading" order by LastUpdated ASC')

    logger.info('Starting update for %i active comics' % len(activecomics))
    
    for comic in activecomics:
    
        comicid = comic[0]
        mismatch = "no"
        if not mylar.CV_ONLY or comicid[:1] == "G":
            CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
            if CV_EXcomicid is None: pass
            else:
                if CV_EXcomicid['variloop'] == '99':
                    mismatch = "yes"
            if comicid[:1] == "G":
                mylar.importer.GCDimport(comicid)
            else: 
                mylar.importer.addComictoDB(comicid,mismatch)
        else:
            if mylar.CV_ONETIMER == 1:
                logger.fdebug("CV_OneTimer option enabled...")

                #in order to update to JUST CV_ONLY, we need to delete the issues for a given series so it's a clean refresh.
                logger.fdebug("Gathering the status of all issues for the series.")
                issues = myDB.select('SELECT * FROM issues WHERE ComicID=?', [comicid])
                #store the issues' status for a given comicid, after deleting and readding, flip the status back to what it is currently.                
                logger.fdebug("Deleting all issue data.")
                myDB.select('DELETE FROM issues WHERE ComicID=?', [comicid])            
                logger.fdebug("Refreshing the series and pulling in new data using only CV.")
                mylar.importer.addComictoDB(comicid,mismatch)
                issues_new = myDB.select('SELECT * FROM issues WHERE ComicID=?', [comicid])
                icount = 0
                logger.fdebug("Attempting to put the Status' back how they were.")
                for issue in issues:
                    for issuenew in issues_new:
                        if issuenew['IssueID'] == issue['IssueID'] and issuenew['Status'] != issue['Status']:
                            #change the status to the previous status
                            ctrlVAL = {'IssueID':  issue['IssueID']}
                            newVAL = {'Status':  issue['Status']}
                            myDB.upsert("Issues", newVAL, ctrlVAL)
                            icount+=1
                            break
                logger.info("In converting data to CV only, I changed the status of " + str(icount) + " issues.")
                mylar.CV_ONETIMER = 0   
            else:
                mylar.importer.addComictoDB(comicid,mismatch)
        time.sleep(5) #pause for 5 secs so dont hammer CV and get 500 error
    logger.info('Update complete')
Beispiel #26
0
    def GCDaddComic(self, comicid, comicname=None, comicyear=None, comicissues=None, comiccover=None, comicpublisher=None):
        #since we already know most of the info, let's add it to the db so we can reference it later.
        myDB = db.DBConnection()
        gcomicid = "G" + str(comicid)
        comicyear_len = comicyear.find(' ', 2)
        comyear = comicyear[comicyear_len+1:comicyear_len+5]
        if comyear.isdigit():
            logger.fdebug("Series year set to : " + str(comyear))
        else:
            logger.fdebug("Invalid Series year detected - trying to adjust from " + str(comyear))
            #comicyear_len above will trap wrong year if it's 10 October 2010 - etc ( 2000 AD)...
            find_comicyear = comicyear.split()
            for i in find_comicyear:
                if len(i) == 4:
                    logger.fdebug("Series year detected as : " + str(i))
                    comyear = str(i)
                    continue

            logger.fdebug("Series year set to: " + str(comyear))
            
        controlValueDict = { 'ComicID': gcomicid }
        newValueDict = {'ComicName': comicname,
                        'ComicYear': comyear,
                        'ComicPublished': comicyear,
                        'ComicPublisher': comicpublisher,
                        'ComicImage': comiccover,
                        'Total' : comicissues }
        myDB.upsert("comics", newValueDict, controlValueDict)
        threading.Thread(target=importer.GCDimport, args=[gcomicid]).start()
        raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % gcomicid)
Beispiel #27
0
def checkthis(datecheck,datestatus,usedate):

    logger.fdebug('Now checking date comparison using an issue store date of ' + str(datecheck))
    logger.fdebug('Using a compare date (usedate) of ' + str(usedate))
    logger.fdebug('Status of ' + str(datestatus))

    if int(datecheck) >= int(usedate):
        logger.fdebug('Store Date falls within acceptable range - series MATCH')
        valid_check = True
    elif int(datecheck) < int(usedate):
        logger.fdebug('The issue date of issue was on ' + str(datecheck) + ' which is prior to ' + str(usedate))
        valid_check = False

    return valid_check
Beispiel #28
0
def weekly_singlecopy(comicid, issuenum, file, path):
    myDB = db.DBConnection()
    try:
        pull_date = myDB.selectone("SELECT SHIPDATE from weekly").fetchone()
        if (pull_date is None):
            pulldate = '00000000'
        else:
            pulldate = pull_date['SHIPDATE']

        logger.fdebug(u"Weekly pull list detected as : " + str(pulldate))

    except (sqlite3.OperationalError, TypeError),msg:
        logger.info(u"Error determining current weekly pull-list date - you should refresh the pull-list manually probably.")
        return
Beispiel #29
0
    def markFailed(self):
        #use this to forcibly mark a single issue as being Failed (ie. if a search result is sent to a client, but the result
        #ends up passing in a 404 or something that makes it so that the download can't be initiated).
        module = '[FAILED-DOWNLOAD]'

        myDB = db.DBConnection()

        logger.info(module + ' Marking as a Failed Download.')

        logger.fdebug(module + 'nzb_name: ' + self.nzb_name)
        logger.fdebug(module + 'issueid: ' + str(self.issueid))
        logger.fdebug(module + 'nzb_id: ' + str(self.id))
        logger.fdebug(module + 'prov: ' + self.prov)

        if 'annual' in self.nzb_name.lower():
            logger.info(module + ' Annual detected.')
            annchk = "yes"
            issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone()
        else:
            issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone()


        ctrlVal = {"IssueID": self.issueid}
        Vals = {"Status":    'Failed'}
        myDB.upsert("issues", Vals, ctrlVal)

        ctrlVal = {"ID":       self.id,
                   "Provider": self.prov,
                   "NZBName":  self.nzb_name}
        Vals = {"Status":       'Failed',
                "ComicName":    issuenzb['ComicName'],
                "Issue_Number": issuenzb['Issue_Number']}
        myDB.upsert("failed", Vals, ctrlVal)

        logger.info(module + ' Successfully marked as Failed.')
Beispiel #30
0
    def _send(self, data, module):

        r = self._session.post(self.NMA_URL, data=data)

        logger.fdebug('[NMA] Status code returned: ' + str(r.status_code))
        if r.status_code == 200:
            logger.info(module + ' NotifyMyAndroid notifications sent.')
            return True
        elif r.status_code >= 400 and r.status_code < 500:
            logger.error(module + ' NotifyMyAndroid request failed: %s' % r.content)
            return False
        else:
            logger.error(module + ' NotifyMyAndroid  notification failed serverside.')
            return False
Beispiel #31
0
    csvfile = open(newfl, "rb")
    creader = csv.reader(csvfile, delimiter='\t')

    t = 1

    for row in creader:
        try:
            #print ("Row: %s" % row)
            cursor.execute("INSERT INTO future VALUES (?,?,?,?,?,?,?,null);",
                           row)
        except Exception, e:
            logger.fdebug("Error - invald arguments...-skipping")
            pass
        t += 1
    logger.fdebug('successfully added ' + str(t) +
                  ' issues to future upcoming table.')
    csvfile.close()
    connection.commit()
    connection.close()

    mylar.weeklypull.pullitcheck(futurepull="yes")
    #.end


def populate(link, publisher, shipdate):
    #this is the secondary url call to populate
    input = 'http://www.comicbookresources.com/' + link
    #print 'checking ' + str(input)
    response = urllib2.urlopen(input)
    soup = BeautifulSoup(response)
    abc = soup.findAll('p')
Beispiel #32
0
def run(dirName,
        nzbName=None,
        issueid=None,
        comversion=None,
        manual=None,
        filename=None,
        module=None,
        manualmeta=False):
    if module is None:
        module = ''
    module += '[META-TAGGER]'

    logger.fdebug(module + ' dirName:' + dirName)

    # 2015-11-23: Recent CV API changes restrict the rate-limit to 1 api request / second.
    # ComicTagger has to be included now with the install as a timer had to be added to allow for the 1/second rule.
    comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.py')
    logger.fdebug(
        'ComicTagger Path location for internal comictagger.py set to : ' +
        comictagger_cmd)

    # Force mylar to use cmtagger_path = mylar.PROG_DIR to force the use of the included lib.

    logger.fdebug(module + ' Filename is : ' + filename)

    filepath = filename
    og_filepath = filepath
    try:
        filename = os.path.split(filename)[1]  # just the filename itself
    except:
        logger.warn(
            'Unable to detect filename within directory - I am aborting the tagging. You best check things out.'
        )
        return "fail"

    #make use of temporary file location in order to post-process this to ensure that things don't get hammered when converting
    new_filepath = None
    new_folder = None
    try:
        import tempfile
        logger.fdebug('Filepath: %s' % filepath)
        logger.fdebug('Filename: %s' % filename)
        new_folder = tempfile.mkdtemp(
            prefix='mylar_', dir=mylar.CONFIG.CACHE_DIR)  #prefix, suffix, dir
        logger.fdebug('New_Folder: %s' % new_folder)
        new_filepath = os.path.join(new_folder, filename)
        logger.fdebug('New_Filepath: %s' % new_filepath)
        if mylar.CONFIG.FILE_OPTS == 'copy' and manualmeta == False:
            shutil.copy(filepath, new_filepath)
        else:
            shutil.copy(filepath, new_filepath)
        filepath = new_filepath
    except Exception as e:
        logger.warn('%s Unexpected Error: %s [%s]' %
                    (module, sys.exc_info()[0], e))
        logger.warn(
            module +
            ' Unable to create temporary directory to perform meta-tagging. Processing without metatagging.'
        )
        tidyup(og_filepath, new_filepath, new_folder, manualmeta)
        return "fail"

    ## Sets up other directories ##
    scriptname = os.path.basename(sys.argv[0])
    downloadpath = os.path.abspath(dirName)
    sabnzbdscriptpath = os.path.dirname(sys.argv[0])
    comicpath = new_folder

    logger.fdebug(module + ' Paths / Locations:')
    logger.fdebug(module + ' scriptname : ' + scriptname)
    logger.fdebug(module + ' downloadpath : ' + downloadpath)
    logger.fdebug(module + ' sabnzbdscriptpath : ' + sabnzbdscriptpath)
    logger.fdebug(module + ' comicpath : ' + comicpath)
    logger.fdebug(module + ' Running the ComicTagger Add-on for Mylar')

    ##set up default comictagger options here.
    #used for cbr - to - cbz conversion
    #depending on copy/move - eitehr we retain the rar or we don't.
    if mylar.CONFIG.FILE_OPTS == 'move':
        cbr2cbzoptions = ["-e", "--delete-rar"]
    else:
        cbr2cbzoptions = ["-e"]

    tagoptions = ["-s"]
    if mylar.CONFIG.CMTAG_VOLUME:
        if mylar.CONFIG.CMTAG_START_YEAR_AS_VOLUME:
            comversion = str(comversion)
        else:
            if any(
                [comversion is None, comversion == '', comversion == 'None']):
                comversion = '1'
            comversion = re.sub('[^0-9]', '', comversion).strip()
        cvers = 'volume=' + str(comversion)
    else:
        cvers = "volume="

    tagoptions.extend(["-m", cvers])

    try:
        #from comictaggerlib import ctversion
        ct_check = subprocess.check_output(
            [sys.executable, comictagger_cmd, "--version"],
            stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as e:
        #logger.warn(module + "[WARNING] "command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
        logger.warn(
            module +
            '[WARNING] Make sure that you are using the comictagger included with Mylar.'
        )
        tidyup(filepath, new_filepath, new_folder, manualmeta)
        return "fail"

    logger.info('ct_check: %s' % ct_check)
    ctend = str(ct_check).find(':')
    ct_version = re.sub("[^0-9]", "", str(ct_check)[:ctend])
    from pkg_resources import parse_version
    if parse_version(ct_version) >= parse_version('1.3.1'):
        if any([
                mylar.CONFIG.COMICVINE_API == 'None',
                mylar.CONFIG.COMICVINE_API is None
        ]):
            logger.fdebug(
                '%s ComicTagger v.%s being used - no personal ComicVine API Key supplied. Take your chances.'
                % (module, ct_version))
            use_cvapi = "False"
        else:
            logger.fdebug(
                '%s ComicTagger v.%s being used - using personal ComicVine API key supplied via mylar.'
                % (module, ct_version))
            use_cvapi = "True"
            tagoptions.extend(["--cv-api-key", mylar.CONFIG.COMICVINE_API])
    else:
        logger.fdebug(
            '%s ComicTagger v.ct_version being used - personal ComicVine API key not supported in this version. Good luck.'
            % (module, ct_version))
        use_cvapi = "False"

    i = 1
    tagcnt = 0

    if mylar.CONFIG.CT_TAG_CR:
        tagcnt = 1
        logger.fdebug(module + ' CR Tagging enabled.')

    if mylar.CONFIG.CT_TAG_CBL:
        if not mylar.CONFIG.CT_TAG_CR:
            i = 2  #set the tag to start at cbl and end without doing another tagging.
        tagcnt = 2
        logger.fdebug(module + ' CBL Tagging enabled.')

    if tagcnt == 0:
        logger.warn(
            module +
            ' You have metatagging enabled, but you have not selected the type(s) of metadata to write. Please fix and re-run manually'
        )
        tidyup(filepath, new_filepath, new_folder, manualmeta)
        return "fail"

    #if it's a cbz file - check if no-overwrite existing tags is enabled / disabled in config.
    if filename.endswith('.cbz'):
        if mylar.CONFIG.CT_CBZ_OVERWRITE:
            logger.fdebug(
                module + ' Will modify existing tag blocks even if it exists.')
        else:
            logger.fdebug(
                module +
                ' Will NOT modify existing tag blocks even if they exist already.'
            )
            tagoptions.extend(["--nooverwrite"])

    if issueid is None:
        tagoptions.extend(["-f", "-o"])
    else:
        tagoptions.extend(["-o", "--id", issueid])

    original_tagoptions = tagoptions
    og_tagtype = None
    initial_ctrun = True
    error_remove = False

    while (i <= tagcnt):
        if initial_ctrun:
            f_tagoptions = cbr2cbzoptions
            f_tagoptions.extend([filepath])
        else:
            if i == 1:
                tagtype = 'cr'  # CR meta-tagging cycle.
                tagdisp = 'ComicRack tagging'
            elif i == 2:
                tagtype = 'cbl'  # Cbl meta-tagging cycle
                tagdisp = 'Comicbooklover tagging'

            f_tagoptions = original_tagoptions

            if og_tagtype is not None:
                for index, item in enumerate(f_tagoptions):
                    if item == og_tagtype:
                        f_tagoptions[index] = tagtype
            else:
                f_tagoptions.extend(["--type", tagtype, filepath])

            og_tagtype = tagtype

            logger.info(module + ' ' + tagdisp +
                        ' meta-tagging processing started.')

        currentScriptName = [sys.executable, comictagger_cmd]
        script_cmd = currentScriptName + f_tagoptions

        if initial_ctrun:
            logger.fdebug('%s Enabling ComicTagger script with options: %s' %
                          (module, f_tagoptions))
            script_cmdlog = script_cmd

        else:
            logger.fdebug('%s Enabling ComicTagger script with options: %s' %
                          (module,
                           re.sub(
                               f_tagoptions[f_tagoptions.index(
                                   mylar.CONFIG.COMICVINE_API)], 'REDACTED',
                               str(f_tagoptions))))
            # generate a safe command line string to execute the script and provide all the parameters
            script_cmdlog = re.sub(
                f_tagoptions[f_tagoptions.index(mylar.CONFIG.COMICVINE_API)],
                'REDACTED', str(script_cmd))

        logger.fdebug(module + ' Executing command: ' + str(script_cmdlog))
        logger.fdebug(module + ' Absolute path to script: ' + script_cmd[0])
        try:
            # use subprocess to run the command and capture output
            p = subprocess.Popen(script_cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT)
            out, err = p.communicate()
            logger.info(out)
            logger.info(err)
            if out is not None:
                out = out.decode('utf-8')
            if err is not None:
                err = err.decode('utf-8')
            if initial_ctrun and 'exported successfully' in out:
                logger.fdebug(module + '[COMIC-TAGGER] : ' + str(out))
                #Archive exported successfully to: X-Men v4 008 (2014) (Digital) (Nahga-Empire).cbz (Original deleted)
                if 'Error deleting' in filepath:
                    tf1 = out.find('exported successfully to: ')
                    tmpfilename = out[tf1 + len('exported successfully to: '
                                                ):].strip()
                    error_remove = True
                else:
                    tmpfilename = re.sub('Archive exported successfully to: ',
                                         '', out.rstrip())
                if mylar.CONFIG.FILE_OPTS == 'move':
                    tmpfilename = re.sub('\(Original deleted\)', '',
                                         tmpfilename).strip()
                tmpf = tmpfilename  #.decode('utf-8')
                filepath = os.path.join(comicpath, tmpf)
                if filename.lower() != tmpf.lower() and tmpf.endswith(
                        '(1).cbz'):
                    logger.fdebug(
                        'New filename [%s] is named incorrectly due to duplication during metatagging - Making sure it\'s named correctly [%s].'
                        % (tmpf, filename))
                    tmpfilename = filename
                    filepath_new = os.path.join(comicpath, tmpfilename)
                    try:
                        os.rename(filepath, filepath_new)
                        filepath = filepath_new
                    except:
                        logger.warn(
                            '%s unable to rename file to accomodate metatagging cbz to the same filename'
                            % module)
                if not os.path.isfile(filepath):
                    logger.fdebug(module + 'Trying utf-8 conversion.')
                    tmpf = tmpfilename.encode('utf-8')
                    filepath = os.path.join(comicpath, tmpf)
                    if not os.path.isfile(filepath):
                        logger.fdebug(module + 'Trying latin-1 conversion.')
                        tmpf = tmpfilename.encode('Latin-1')
                        filepath = os.path.join(comicpath, tmpf)

                logger.fdebug(module +
                              '[COMIC-TAGGER][CBR-TO-CBZ] New filename: ' +
                              filepath)
                initial_ctrun = False
            elif initial_ctrun and 'Archive is not a RAR' in out:
                logger.fdebug('%s Output: %s' % (module, out))
                logger.warn(module +
                            '[COMIC-TAGGER] file is not in a RAR format: ' +
                            filename)
                initial_ctrun = False
            elif initial_ctrun:
                initial_ctrun = False
                if 'file is not expected size' in out:
                    logger.fdebug('%s Output: %s' % (module, out))
                    tidyup(og_filepath, new_filepath, new_folder, manualmeta)
                    return 'corrupt'
                else:
                    logger.warn(
                        module +
                        '[COMIC-TAGGER][CBR-TO-CBZ] Failed to convert cbr to cbz - check permissions on folder : '
                        + mylar.CONFIG.CACHE_DIR +
                        ' and/or the location where Mylar is trying to tag the files from.'
                    )
                    tidyup(og_filepath, new_filepath, new_folder, manualmeta)
                    return 'fail'
            elif 'Cannot find' in out:
                logger.fdebug('%s Output: %s' % (module, out))
                logger.warn(module + '[COMIC-TAGGER] Unable to locate file: ' +
                            filename)
                file_error = 'file not found||' + filename
                return file_error
            elif 'not a comic archive!' in out:
                logger.fdebug('%s Output: %s' % (module, out))
                logger.warn(module + '[COMIC-TAGGER] Unable to locate file: ' +
                            filename)
                file_error = 'file not found||' + filename
                return file_error
            else:
                logger.info(module + '[COMIC-TAGGER] Successfully wrote ' +
                            tagdisp + ' [' + filepath + ']')
                i += 1
        except OSError as e:
            logger.warn(
                module +
                '[COMIC-TAGGER] Unable to run comictagger with the options provided: '
                + re.sub(
                    f_tagoptions[f_tagoptions.index(
                        mylar.CONFIG.COMICVINE_API)], 'REDACTED',
                    str(script_cmd)))
            tidyup(filepath, new_filepath, new_folder, manualmeta)
            return "fail"

        if mylar.CONFIG.CBR2CBZ_ONLY and initial_ctrun == False:
            break

    return filepath
Beispiel #33
0
def libraryScan(dir=None,
                append=False,
                ComicID=None,
                ComicName=None,
                cron=None):

    if cron and not mylar.LIBRARYSCAN:
        return

    if not dir:
        dir = mylar.COMIC_DIR

    # If we're appending a dir, it's coming from the post processor which is
    # already bytestring
    if not append:
        dir = dir.encode(mylar.SYS_ENCODING)

    if not os.path.isdir(dir):
        logger.warn('Cannot find directory: %s. Not scanning' %
                    dir.decode(mylar.SYS_ENCODING, 'replace'))
        return

    logger.info('Scanning comic directory: %s' %
                dir.decode(mylar.SYS_ENCODING, 'replace'))

    basedir = dir

    comic_list = []
    comiccnt = 0
    extensions = ('cbr', 'cbz')
    for r, d, f in os.walk(dir):
        #for directory in d[:]:
        #    if directory.startswith("."):
        #        d.remove(directory)
        for files in f:
            if any(files.lower().endswith('.' + x.lower())
                   for x in extensions):
                comic = files
                comicpath = os.path.join(r, files)
                comicsize = os.path.getsize(comicpath)
                print "Comic: " + comic
                print "Comic Path: " + comicpath
                print "Comic Size: " + str(comicsize)

                # We need the unicode path to use for logging, inserting into database
                unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING,
                                                      'replace')

                comiccnt += 1
                comic_dict = {
                    'ComicFilename': comic,
                    'ComicLocation': comicpath,
                    'ComicSize': comicsize,
                    'Unicode_ComicLocation': unicode_comic_path
                }
                comic_list.append(comic_dict)

        logger.info("I've found a total of " + str(comiccnt) +
                    " comics....analyzing now")
        logger.info("comiclist: " + str(comic_list))
    myDB = db.DBConnection()

    #let's load in the watchlist to see if we have any matches.
    logger.info(
        "loading in the watchlist to see if a series is being watched already..."
    )
    watchlist = myDB.select("SELECT * from comics")
    ComicName = []
    DisplayName = []
    ComicYear = []
    ComicPublisher = []
    ComicTotal = []
    ComicID = []
    ComicLocation = []

    AltName = []
    watchcnt = 0

    watch_kchoice = []
    watchchoice = {}
    import_by_comicids = []
    import_comicids = {}

    for watch in watchlist:
        #use the comicname_filesafe to start
        watchdisplaycomic = re.sub('[\_\#\,\/\:\;\!\$\%\&\+\'\?\@]', ' ',
                                   watch['ComicName']).encode('utf-8').strip()
        # let's clean up the name, just in case for comparison purposes...
        watchcomic = re.sub(
            '[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ',
            watch['ComicName_Filesafe']).encode('utf-8').strip()
        #watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip()

        if ' the ' in watchcomic.lower():
            #drop the 'the' from the watchcomic title for proper comparisons.
            watchcomic = watchcomic[-4:]

        alt_chk = "no"  # alt-checker flag (default to no)

        # account for alternate names as well
        if watch['AlternateSearch'] is not None and watch[
                'AlternateSearch'] is not 'None':
            altcomic = re.sub(
                '[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ',
                watch['AlternateSearch']).encode('utf-8').strip()
            #altcomic = re.sub('\s+', ' ', str(altcomic)).strip()
            AltName.append(altcomic)
            alt_chk = "yes"  # alt-checker flag

        ComicName.append(watchcomic)
        DisplayName.append(watchdisplaycomic)
        ComicYear.append(watch['ComicYear'])
        ComicPublisher.append(watch['ComicPublisher'])
        ComicTotal.append(watch['Total'])
        ComicID.append(watch['ComicID'])
        ComicLocation.append(watch['ComicLocation'])
        watchcnt += 1

    logger.info("Successfully loaded " + str(watchcnt) +
                " series from your watchlist.")

    ripperlist = ['digital-', 'empire', 'dcp']

    watchfound = 0

    datelist = [
        'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
        'nov', 'dec'
    ]
    #    datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
    #    #search for number as text, and change to numeric
    #    for numbs in basnumbs:
    #        #print ("numbs:" + str(numbs))
    #        if numbs in ComicName.lower():
    #            numconv = basnumbs[numbs]
    #            #print ("numconv: " + str(numconv))

    for i in comic_list:
        print i['ComicFilename']

        #if mylar.IMP_METADATA:
        #logger.info('metatagging checking enabled.')
        #if read tags is enabled during import, check here.
        #if i['ComicLocation'].endswith('.cbz'):
        #    logger.info('Attempting to read tags present in filename: ' + str(i['ComicLocation']))
        #    issueinfo = helpers.IssueDetails(i['ComicLocation'])
        #    if issueinfo is None:
        #        pass
        #    else:
        #        logger.info('Successfully retrieved some tags. Lets see what I can figure out.')
        #        comicname = issueinfo[0]['series']
        #        logger.fdebug('Series Name: ' + comicname)
        #        issue_number = issueinfo[0]['issue_number']
        #        logger.fdebug('Issue Number: ' + str(issue_number))
        #        issuetitle = issueinfo[0]['title']
        #        logger.fdebug('Issue Title: ' + issuetitle)
        #        issueyear = issueinfo[0]['year']
        #        logger.fdebug('Issue Year: ' + str(issueyear))
        #        # if used by ComicTagger, Notes field will have the IssueID.
        #        issuenotes = issueinfo[0]['notes']
        #        logger.fdebug('Notes: ' + issuenotes)

        comfilename = i['ComicFilename']
        comlocation = i['ComicLocation']
        #let's clean up the filename for matching purposes

        cfilename = re.sub('[\_\#\,\/\:\;\-\!\$\%\&\+\'\?\@]', ' ',
                           comfilename)
        #cfilename = re.sub('\s', '_', str(cfilename))
        d_filename = re.sub('[\_\#\,\/\;\!\$\%\&\?\@]', ' ', comfilename)
        d_filename = re.sub('[\:\-\+\']', '#', d_filename)

        #strip extraspaces
        d_filename = re.sub('\s+', ' ', d_filename)
        cfilename = re.sub('\s+', ' ', cfilename)

        #versioning - remove it
        subsplit = cfilename.replace('_', ' ').split()
        volno = None
        volyr = None
        for subit in subsplit:
            if subit[0].lower() == 'v':
                vfull = 0
                if subit[1:].isdigit():
                    #if in format v1, v2009 etc...
                    if len(subit) > 3:
                        # if it's greater than 3 in length, then the format is Vyyyy
                        vfull = 1  # add on 1 character length to account for extra space
                    cfilename = re.sub(subit, '', cfilename)
                    d_filename = re.sub(subit, '', d_filename)
                    volno = re.sub("[^0-9]", " ", subit)
                elif subit.lower()[:3] == 'vol':
                    #if in format vol.2013 etc
                    #because the '.' in Vol. gets removed, let's loop thru again after the Vol hit to remove it entirely
                    logger.fdebug('volume indicator detected as version #:' +
                                  str(subit))
                    cfilename = re.sub(subit, '', cfilename)
                    cfilename = " ".join(cfilename.split())
                    d_filename = re.sub(subit, '', d_filename)
                    d_filename = " ".join(d_filename.split())
                    volyr = re.sub("[^0-9]", " ", subit).strip()
                    logger.fdebug('volume year set as : ' + str(volyr))
        cm_cn = 0

        #we need to track the counter to make sure we are comparing the right array parts
        #this takes care of the brackets :)
        m = re.findall('[^()]+', cfilename)
        lenm = len(m)
        logger.fdebug("there are " + str(lenm) + " words.")
        cnt = 0
        yearmatch = "false"
        foundonwatch = "False"
        issue = 999999

        while (cnt < lenm):
            if m[cnt] is None: break
            if m[cnt] == ' ':
                pass
            else:
                logger.fdebug(str(cnt) + ". Bracket Word: " + m[cnt])
                if cnt == 0:
                    comic_andiss = m[cnt]
                    logger.fdebug("Comic: " + comic_andiss)
                    # if it's not in the standard format this will bork.
                    # let's try to accomodate (somehow).
                    # first remove the extension (if any)
                    extensions = ('cbr', 'cbz')
                    if comic_andiss.lower().endswith(extensions):
                        comic_andiss = comic_andiss[:-4]
                        logger.fdebug("removed extension from filename.")
                    #now we have to break up the string regardless of formatting.
                    #let's force the spaces.
                    comic_andiss = re.sub('_', ' ', comic_andiss)
                    cs = comic_andiss.split()
                    cs_len = len(cs)
                    cn = ''
                    ydetected = 'no'
                    idetected = 'no'
                    decimaldetect = 'no'
                    for i in reversed(xrange(len(cs))):
                        #start at the end.
                        logger.fdebug("word: " + str(cs[i]))
                        #assume once we find issue - everything prior is the actual title
                        #idetected = no will ignore everything so it will assume all title
                        if cs[i][:-2] == '19' or cs[
                                i][:-2] == '20' and idetected == 'no':
                            logger.fdebug("year detected: " + str(cs[i]))
                            ydetected = 'yes'
                            result_comyear = cs[i]
                        elif cs[i].isdigit(
                        ) and idetected == 'no' or '.' in cs[i]:
                            if '.' in cs[i]:
                                #make sure it's a number on either side of decimal and assume decimal issue.
                                decst = cs[i].find('.')
                                dec_st = cs[i][:decst]
                                dec_en = cs[i][decst + 1:]
                                logger.fdebug("st: " + str(dec_st))
                                logger.fdebug("en: " + str(dec_en))
                                if dec_st.isdigit() and dec_en.isdigit():
                                    logger.fdebug(
                                        "decimal issue detected...adjusting.")
                                    issue = dec_st + "." + dec_en
                                    logger.fdebug("issue detected: " +
                                                  str(issue))
                                    idetected = 'yes'
                                else:
                                    logger.fdebug(
                                        "false decimal represent. Chunking to extra word."
                                    )
                                    cn = cn + cs[i] + " "
                                    break
                            issue = cs[i]
                            logger.fdebug("issue detected : " + str(issue))
                            idetected = 'yes'

                        elif '\#' in cs[i] or decimaldetect == 'yes':
                            logger.fdebug("issue detected: " + str(cs[i]))
                            idetected = 'yes'
                        else:
                            cn = cn + cs[i] + " "
                    if ydetected == 'no':
                        #assume no year given in filename...
                        result_comyear = "0000"
                    logger.fdebug("cm?: " + str(cn))
                    if issue is not '999999':
                        comiss = issue
                    else:
                        logger.ERROR(
                            "Invalid Issue number (none present) for " +
                            comfilename)
                        break
                    cnsplit = cn.split()
                    cname = ''
                    findcn = 0
                    while (findcn < len(cnsplit)):
                        cname = cname + cs[findcn] + " "
                        findcn += 1
                    cname = cname[:len(cname) - 1]  # drop the end space...
                    print("assuming name is : " + cname)
                    com_NAME = cname
                    print("com_NAME : " + com_NAME)
                    yearmatch = "True"
                else:
                    logger.fdebug('checking ' + m[cnt])
                    # we're assuming that the year is in brackets (and it should be damnit)
                    if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
                        print("year detected: " + str(m[cnt]))
                        ydetected = 'yes'
                        result_comyear = m[cnt]
                    elif m[cnt][:3].lower() in datelist:
                        logger.fdebug(
                            'possible issue date format given - verifying')
                        #if the date of the issue is given as (Jan 2010) or (January 2010) let's adjust.
                        #keeping in mind that ',' and '.' are already stripped from the string
                        if m[cnt][-4:].isdigit():
                            ydetected = 'yes'
                            result_comyear = m[cnt][-4:]
                            logger.fdebug('Valid Issue year of ' +
                                          str(result_comyear) +
                                          'detected in format of ' +
                                          str(m[cnt]))
            cnt += 1

        displength = len(cname)
        print 'd_filename is : ' + d_filename
        charcount = d_filename.count('#')
        print('charcount is : ' + str(charcount))
        if charcount > 0:
            print('entering loop')
            for i, m in enumerate(re.finditer('\#', d_filename)):
                if m.end() <= displength:
                    print comfilename[m.start():m.end()]
                    # find occurance in c_filename, then replace into d_filname so special characters are brought across
                    newchar = comfilename[m.start():m.end()]
                    print 'newchar:' + str(newchar)
                    d_filename = d_filename[:m.start()] + str(
                        newchar) + d_filename[m.end():]
                    print 'd_filename:' + str(d_filename)

        dispname = d_filename[:displength]
        print('dispname : ' + dispname)

        splitit = []
        watchcomic_split = []
        logger.fdebug("filename comic and issue: " + comic_andiss)

        #changed this from '' to ' '
        comic_iss_b4 = re.sub('[\-\:\,]', ' ', comic_andiss)
        comic_iss = comic_iss_b4.replace('.', ' ')
        comic_iss = re.sub('[\s+]', ' ', comic_iss).strip()
        logger.fdebug("adjusted comic and issue: " + str(comic_iss))
        #remove 'the' from here for proper comparisons.
        if ' the ' in comic_iss.lower():
            comic_iss = re.sub('\\bthe\\b', '', comic_iss).strip()
        splitit = comic_iss.split(None)
        logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " +
                      str(comic_iss))
        #here we cycle through the Watchlist looking for a match.
        while (cm_cn < watchcnt):
            #setup the watchlist
            comname = ComicName[cm_cn]
            comyear = ComicYear[cm_cn]
            compub = ComicPublisher[cm_cn]
            comtotal = ComicTotal[cm_cn]
            comicid = ComicID[cm_cn]
            watch_location = ComicLocation[cm_cn]

            # there shouldn't be an issue in the comic now, so let's just assume it's all gravy.
            splitst = len(splitit)
            watchcomic_split = helpers.cleanName(comname)
            watchcomic_split = re.sub('[\-\:\,\.]', ' ',
                                      watchcomic_split).split(None)

            logger.fdebug(
                str(splitit) + " file series word count: " + str(splitst))
            logger.fdebug(
                str(watchcomic_split) + " watchlist word count: " +
                str(len(watchcomic_split)))
            if (splitst) != len(watchcomic_split):
                logger.fdebug("incorrect comic lengths...not a match")


#                if str(splitit[0]).lower() == "the":
#                    logger.fdebug("THE word detected...attempting to adjust pattern matching")
#                    splitit[0] = splitit[4:]
            else:
                logger.fdebug("length match..proceeding")
                n = 0
                scount = 0
                logger.fdebug("search-length: " + str(splitst))
                logger.fdebug("Watchlist-length: " +
                              str(len(watchcomic_split)))
                while (n <= (splitst) - 1):
                    logger.fdebug("splitit: " + str(splitit[n]))
                    if n < (splitst) and n < len(watchcomic_split):
                        logger.fdebug(
                            str(n) + " Comparing: " +
                            str(watchcomic_split[n]) + " .to. " +
                            str(splitit[n]))
                        if '+' in watchcomic_split[n]:
                            watchcomic_split[n] = re.sub(
                                '+', '', str(watchcomic_split[n]))
                        if str(watchcomic_split[n].lower()) in str(
                                splitit[n].lower()) and len(
                                    watchcomic_split[n]) >= len(splitit[n]):
                            logger.fdebug("word matched on : " +
                                          str(splitit[n]))
                            scount += 1
                        #elif ':' in splitit[n] or '-' in splitit[n]:
                        #    splitrep = splitit[n].replace('-', '')
                        #    print ("non-character keyword...skipped on " + splitit[n])
                    elif str(splitit[n]).lower().startswith('v'):
                        logger.fdebug("possible versioning..checking")
                        #we hit a versioning # - account for it
                        if splitit[n][1:].isdigit():
                            comicversion = str(splitit[n])
                            logger.fdebug("version found: " +
                                          str(comicversion))
                    else:
                        logger.fdebug("Comic / Issue section")
                        if splitit[n].isdigit():
                            logger.fdebug("issue detected")
                        else:
                            logger.fdebug("non-match for: " + str(splitit[n]))
                            pass
                    n += 1
                #set the match threshold to 80% (for now)
                # if it's less than 80% consider it a non-match and discard.
                #splitit has to splitit-1 because last position is issue.
                wordcnt = int(scount)
                logger.fdebug("scount:" + str(wordcnt))
                totalcnt = int(splitst)
                logger.fdebug("splitit-len:" + str(totalcnt))
                spercent = (wordcnt / totalcnt) * 100
                logger.fdebug("we got " + str(spercent) + " percent.")
                if int(spercent) >= 80:
                    logger.fdebug("it's a go captain... - we matched " +
                                  str(spercent) + "%!")
                    logger.fdebug("this should be a match!")
                    logger.fdebug("issue we found for is : " + str(comiss))
                    #set the year to the series we just found ;)
                    result_comyear = comyear
                    #issue comparison now as well
                    logger.info(u"Found " + comname + " (" + str(comyear) +
                                ") issue: " + str(comiss))
                    watchmatch = str(comicid)
                    dispname = DisplayName[cm_cn]
                    foundonwatch = "True"
                    break
                elif int(spercent) < 80:
                    logger.fdebug("failure - we only got " + str(spercent) +
                                  "% right!")
            cm_cn += 1

        if foundonwatch == "False":
            watchmatch = None
        #---if it's not a match - send it to the importer.
        n = 0

        if volyr is None:
            if result_comyear is None:
                result_comyear = '0000'  #no year in filename basically.
        else:
            if result_comyear is None:
                result_comyear = volyr
        if volno is None:
            if volyr is None:
                vol_label = None
            else:
                vol_label = volyr
        else:
            vol_label = volno

        print("adding " + com_NAME + " to the import-queue!")
        impid = com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
        print("impid: " + str(impid))
        import_by_comicids.append({
            "impid":
            impid,
            "watchmatch":
            watchmatch,
            "displayname":
            dispname,
            "comicname":
            com_NAME,
            "comicyear":
            result_comyear,
            "volume":
            vol_label,
            "comfilename":
            comfilename,
            "comlocation":
            comlocation.decode(mylar.SYS_ENCODING)
        })

    if len(watch_kchoice) > 0:
        watchchoice['watchlist'] = watch_kchoice
        print("watchchoice: " + str(watchchoice))

        logger.info("I have found " + str(watchfound) + " out of " +
                    str(comiccnt) +
                    " comics for series that are being watched.")
        wat = 0
        comicids = []

        if watchfound > 0:
            if mylar.IMP_MOVE:
                logger.info(
                    "You checked off Move Files...so that's what I'm going to do"
                )
                #check to see if Move Files is enabled.
                #if not being moved, set the archive bit.
                print("Moving files into appropriate directory")
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comlocation = watch_the_list['ComicLocation']
                    watch_comicid = watch_the_list['ComicID']
                    watch_comicname = watch_the_list['ComicName']
                    watch_comicyear = watch_the_list['ComicYear']
                    watch_comiciss = watch_the_list['ComicIssue']
                    print("ComicLocation: " + str(watch_comlocation))
                    orig_comlocation = watch_the_list['OriginalLocation']
                    orig_filename = watch_the_list['OriginalFilename']
                    print("Orig. Location: " + str(orig_comlocation))
                    print("Orig. Filename: " + str(orig_filename))
                    #before moving check to see if Rename to Mylar structure is enabled.
                    if mylar.IMP_RENAME:
                        print(
                            "Renaming files according to configuration details : "
                            + str(mylar.FILE_FORMAT))
                        renameit = helpers.rename_param(
                            watch_comicid, watch_comicname, watch_comicyear,
                            watch_comiciss)
                        nfilename = renameit['nfilename']

                        dst_path = os.path.join(watch_comlocation, nfilename)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    else:
                        print(
                            "Renaming files not enabled, keeping original filename(s)"
                        )
                        dst_path = os.path.join(watch_comlocation,
                                                orig_filename)

                    #os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
                    #src = os.path.join(, str(nfilename + ext))
                    print("I'm going to move " + str(orig_comlocation) +
                          " to .." + str(dst_path))
                    try:
                        shutil.move(orig_comlocation, dst_path)
                    except (OSError, IOError):
                        logger.info(
                            "Failed to move directory - check directories and manually re-run."
                        )
                    wat += 1
            else:
                # if move files isn't enabled, let's set all found comics to Archive status :)
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comicid = watch_the_list['ComicID']
                    watch_issue = watch_the_list['ComicIssue']
                    print("ComicID: " + str(watch_comicid))
                    print("Issue#: " + str(watch_issue))
                    issuechk = myDB.selectone(
                        "SELECT * from issues where ComicID=? AND INT_IssueNumber=?",
                        [watch_comicid, watch_issue]).fetchone()
                    if issuechk is None:
                        print("no matching issues for this comic#")
                    else:
                        print("...Existing status: " + str(issuechk['Status']))
                        control = {"IssueID": issuechk['IssueID']}
                        values = {"Status": "Archived"}
                        print("...changing status of " +
                              str(issuechk['Issue_Number']) + " to Archived ")
                        myDB.upsert("issues", values, control)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    wat += 1
            if comicids is None: pass
            else:
                c_upd = len(comicids)
                c = 0
                while (c < c_upd):
                    print("Rescanning.. " + str(c))
                    updater.forceRescan(c)
        if not len(import_by_comicids):
            return "Completed"
    if len(import_by_comicids) > 0:
        import_comicids['comic_info'] = import_by_comicids
        print("import comicids: " + str(import_by_comicids))
        return import_comicids, len(import_by_comicids)
Beispiel #34
0
    def folder_create(self, booktype=None, update_loc=None):
        # dictionary needs to passed called comic with
        #  {'ComicPublisher', 'CorrectedType, 'Type', 'ComicYear', 'ComicName', 'ComicVersion'}
        # or pass in comicid value from __init__

        # setup default location here
        if update_loc is not None:
            comic_location = update_loc['temppath']
            enforce_format = update_loc['tempff']
            folder_format = update_loc['tempformat']
            comicid = update_loc['comicid']
        else:
            comic_location = mylar.CONFIG.DESTINATION_DIR
            enforce_format = False
            folder_format = mylar.CONFIG.FOLDER_FORMAT

        if folder_format is None:
            folder_format = '$Series ($Year)'

        if mylar.OS_DETECT == 'Windows':
            if '/' in folder_format:
                folder_format = re.sub('/', '\\', folder_format).strip()
        else:
            if '\\' in folder_format:
                folder_format = folder_format.replace('\\', '/').strip()

        u_comicnm = self.comic['ComicName']
        # let's remove the non-standard characters here that will break filenaming / searching.
        comicname_filesafe = helpers.filesafe(u_comicnm)
        comicdir = comicname_filesafe

        series = comicdir
        if series[-1:] == '.':
            series[:-1]

        publisher = re.sub('!', '',
                           self.comic['ComicPublisher'])  # thanks Boom!
        publisher = helpers.filesafe(publisher)

        if booktype is not None:
            if self.comic['Corrected_Type'] is not None:
                if self.comic['Corrected_Type'] != booktype:
                    booktype = booktype
                else:
                    booktype = self.comic['Corrected_Type']
            else:
                booktype = booktype
        else:
            booktype = self.comic['Type']

        if any([booktype is None, booktype == 'None', booktype == 'Print'
                ]) or all([
                    booktype != 'Print', mylar.CONFIG.FORMAT_BOOKTYPE is False
                ]):
            chunk_fb = re.sub('\$Type', '', folder_format)
            chunk_b = re.compile(r'\s+')
            chunk_folder_format = chunk_b.sub(' ', chunk_fb)
            if booktype != 'Print':
                booktype = 'None'
        else:
            chunk_folder_format = folder_format

        if any([self.comic['ComicVersion'] is None, booktype != 'Print']):
            comicVol = 'None'
        else:
            comicVol = self.comic['ComicVersion']

        #if comversion is None, remove it so it doesn't populate with 'None'
        if comicVol == 'None':
            chunk_f_f = re.sub('\$VolumeN', '', chunk_folder_format)
            chunk_f = re.compile(r'\s+')
            chunk_folder_format = chunk_f.sub(' ', chunk_f_f)

        chunk_folder_format = re.sub("[()|[]]", '',
                                     chunk_folder_format).strip()
        ccf = chunk_folder_format.find('/ ')
        if ccf != -1:
            chunk_folder_format = chunk_folder_format[:ccf +
                                                      1] + chunk_folder_format[
                                                          ccf + 2:]
        ccf = chunk_folder_format.find('\ ')
        if ccf != -1:
            chunk_folder_format = chunk_folder_format[:ccf +
                                                      1] + chunk_folder_format[
                                                          ccf + 2:]

        #do work to generate folder path
        values = {
            '$Series': series,
            '$Publisher': publisher,
            '$Year': self.comic['ComicYear'],
            '$series': series.lower(),
            '$publisher': publisher.lower(),
            '$VolumeY': 'V' + self.comic['ComicYear'],
            '$VolumeN': comicVol.upper(),
            '$Annual': 'Annual',
            '$Type': booktype
        }

        if update_loc is not None:
            #set the paths here with the seperator removed allowing for cross-platform altering.
            ccdir = pathlib.PurePath(comic_location)
            ddir = pathlib.PurePath(mylar.CONFIG.DESTINATION_DIR)
            dlc = pathlib.PurePath(self.comic['ComicLocation'])
            path_convert = True
            i = 0
            bb = []
            while i < len(dlc.parts):
                try:
                    if dlc.parts[i] == ddir.parts[i]:
                        i += 1
                        continue
                    else:
                        bb.append(dlc.parts[i])
                        i += 1  #print('d.parts: %s' % ccdir.parts[i])
                except IndexError:
                    bb.append(dlc.parts[i])
                    i += 1
            bb_tuple = pathlib.PurePath(os.path.sep.join(bb))
            try:
                com_base = pathlib.PurePath(dlc).relative_to(ddir)
            except ValueError as e:
                #if the original path is not located in the same path as the ComicLocation (destination_dir).
                #this can happen when manually altered to a new path, or thru various changes to the ComicLocation path over time.
                #ie. ValueError: '/mnt/Comics/Death of Wolverine The Logan Legacy-(2014)' does not start with '/mnt/mediavg/Comics/Comics-2'
                dir_fix = []
                dir_parts = pathlib.PurePath(dlc).parts
                for dp in dir_parts:
                    try:
                        if self.comic['ComicYear'] is not None:
                            if self.comic['ComicYear'] in dp:
                                break
                        if self.comic['ComicName'] is not None:
                            if self.comic['ComicName'] in dp:
                                break
                        if self.comic['ComicPublisher'] is not None:
                            if self.comic['ComicPublisher'] in dp:
                                break
                        if self.comic['ComicVersion'] is not None:
                            if self.comic['ComicVersion'] in dp:
                                break
                        dir_fix.append(dp)
                    except:
                        pass

                if len(dir_fix) > 0:
                    spath = ''
                    t = 0
                    while (t < len(dir_parts)):
                        newpath = os.path.join(spath, dir_parts[t])
                        t += 1
                    com_base = newpath
                    #path_convert = False
            #print('com_base: %s' % com_base)
            #detect comiclocation path based on OS so that the path seperators are correct
            #have to figure out how to determine OS of original location...
            if mylar.OS_DETECT == 'Windows':
                p_path = pathlib.PureWindowsPath(ccdir)
            else:
                p_path = pathlib.PurePosixPath(ccdir)
            if enforce_format is True:
                first = helpers.replace_all(chunk_folder_format, values)
                if mylar.CONFIG.REPLACE_SPACES:
                    #mylar.CONFIG.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                    first = first.replace(' ', mylar.CONFIG.REPLACE_CHAR)
                comlocation = str(p_path.joinpath(first))
            else:
                comlocation = str(p_path.joinpath(com_base))

            return {
                'comlocation': comlocation,
                'path_convert': path_convert,
                'comicid': comicid
            }
        else:
            ddir = pathlib.PurePath(mylar.CONFIG.DESTINATION_DIR)
            i = 0
            bb = []
            while i < len(ddir.parts):
                try:
                    bb.append(ddir.parts[i])
                    i += 1
                except IndexError:
                    break

            bb2 = bb[0]
            bb.pop(0)
            bb_tuple = pathlib.PurePath(os.path.sep.join(bb))
            logger.fdebug('bb_tuple: %s' % bb_tuple)
            if mylar.OS_DETECT == 'Windows':
                p_path = pathlib.PureWindowsPath(
                    pathlib.PurePath(bb2).joinpath(bb_tuple))
            else:
                p_path = pathlib.PurePosixPath(
                    pathlib.PurePath(bb2).joinpath(bb_tuple))

            logger.fdebug('p_path: %s' % p_path)

            first = helpers.replace_all(chunk_folder_format, values)
            logger.fdebug('first-1: %s' % first)

            if mylar.CONFIG.REPLACE_SPACES:
                first = first.replace(' ', mylar.CONFIG.REPLACE_CHAR)
            logger.fdebug('first-2: %s' % first)
            comlocation = str(p_path.joinpath(first))
            logger.fdebug('comlocation: %s' % comlocation)

            #try:
            #    if folder_format == '':
            #        #comlocation = pathlib.PurePath(comiclocation).joinpath(comicdir, '(%s)') % comic['SeriesYear']
            #        comlocation = os.path.join(comic_location, comicdir, " (" + comic['SeriesYear'] + ")")
            #    else:
            #except TypeError as e:
            #    if comic_location is None:
            #        logger.error('[ERROR] %s' % e)
            #        logger.error('No Comic Location specified. This NEEDS to be set before anything can be added successfully.')
            #        return
            #    else:
            #        logger.error('[ERROR] %s' % e)
            #        return
            #except Exception as e:
            #    logger.error('[ERROR] %s' % e)
            #    logger.error('Cannot determine Comic Location path properly. Check your Comic Location and Folder Format for any errors.')
            #    return

            if comlocation == "":
                logger.error(
                    'There is no Comic Location Path specified - please specify one in Config/Web Interface.'
                )
                return

            return {'comlocation': comlocation, 'subpath': bb_tuple}
Beispiel #35
0
    def historycheck(self, nzbinfo):
        nzbid = nzbinfo['NZBID']
        history = self.server.history(True)
        found = False
        destdir = None
        double_pp = False
        hq = [hs for hs in history if hs['NZBID'] == nzbid
              ]  # and ('SUCCESS' in hs['Status'] or ('COPY' in hs['Status']))]
        if len(hq) > 0:
            logger.fdebug(
                'found matching completed item in history. Job has a status of %s'
                % hq[0]['Status'])
            if len(hq[0]['ScriptStatuses']) > 0:
                for x in hq[0]['ScriptStatuses']:
                    if 'comicrn' in x['Name'].lower():
                        double_pp = True
                        break

            if all([len(hq[0]['Parameters']) > 0, double_pp is False]):
                for x in hq[0]['Parameters']:
                    if all(
                        ['comicrn' in x['Name'].lower(), x['Value'] == 'yes']):
                        double_pp = True
                        break

            if double_pp is True:
                logger.warn(
                    'ComicRN has been detected as being active for this category & download. Completed Download Handling will NOT be performed due to this.'
                )
                logger.warn(
                    'Either disable Completed Download Handling for NZBGet within Mylar, or remove ComicRN from your category script in NZBGet.'
                )
                return {'status': 'double-pp', 'failed': False}

            if all([
                    'SUCCESS' in hq[0]['Status'],
                (hq[0]['FileSizeMB'] * .95) <= hq[0]['DownloadedSizeMB'] <=
                (hq[0]['FileSizeMB'] * 1.05)
            ]):
                logger.fdebug('%s has final file size of %sMB' %
                              (hq[0]['Name'], hq[0]['DownloadedSizeMB']))
                if os.path.isdir(hq[0]['DestDir']):
                    destdir = hq[0]['DestDir']
                    logger.fdebug('location found @ %s' % destdir)
                elif mylar.CONFIG.NZBGET_DIRECTORY is None:
                    logger.fdebug(
                        'Unable to locate path (%s) on the machine that is running Mylar. If Mylar and nzbget are on separate machines, you need to set a directory location that is accessible to both'
                        % hq[0]['DestDir'])
                    return {'status': 'file not found', 'failed': False}

            elif all([
                    'COPY' in hq[0]['Status'],
                    int(hq[0]['FileSizeMB']) > 0,
                    hq[0]['DeleteStatus'] == 'COPY'
            ]):
                if hq[0]['Deleted'] is False:
                    config = self.server.config()
                    cDestDir = None
                    for x in config:
                        if x['Name'] == 'TempDir':
                            cTempDir = x['Value']
                        elif x['Name'] == 'DestDir':
                            cDestDir = x['Value']
                        if cDestDir is not None:
                            break

                    if cTempDir in hq[0]['DestDir']:
                        destdir2 = re.sub(cTempDir, cDestDir,
                                          hq[0]['DestDir']).strip()
                        if not destdir2.endswith(os.sep):
                            destdir2 = destdir2 + os.sep
                        destdir = os.path.join(destdir2, hq[0]['Name'])
                        logger.fdebug('NZBGET Destination dir set to: %s' %
                                      destdir)
                else:
                    history_del = self.server.editqueue(
                        'HistoryMarkBad', '', hq[0]['NZBID'])
                    if history_del is False:
                        logger.fdebug(
                            '[NZBGET] Unable to delete item (%s)from history so I can redownload a clean copy.'
                            % hq[0]['NZBName'])
                        return {'status': 'failure', 'failed': False}
                    else:
                        logger.fdebug(
                            '[NZBGET] Successfully removed prior item (%s) from history. Attempting to get another version.'
                            % hq[0]['NZBName'])
                        return {
                            'status':
                            True,
                            'name':
                            re.sub('.nzb', '', hq[0]['NZBName']).strip(),
                            'location':
                            os.path.abspath(
                                os.path.join(hq[0]['DestDir'], os.pardir)),
                            'failed':
                            True,
                            'issueid':
                            nzbinfo['issueid'],
                            'comicid':
                            nzbinfo['comicid'],
                            'apicall':
                            True,
                            'ddl':
                            False,
                            'download_info':
                            nzbinfo['download_info']
                        }
            elif 'FAILURE' in hq[0]['Status']:
                logger.warn(
                    'item is in a %s status. Considering this a FAILED attempted download for NZBID %s - %s'
                    % (hq[0]['Status'], hq[0]['NZBID'], hq[0]['NZBName']))
                return {
                    'status':
                    True,
                    'name':
                    re.sub('.nzb', '', hq[0]['NZBName']).strip(),
                    'location':
                    os.path.abspath(os.path.join(hq[0]['DestDir'], os.pardir)),
                    'failed':
                    True,
                    'issueid':
                    nzbinfo['issueid'],
                    'comicid':
                    nzbinfo['comicid'],
                    'apicall':
                    True,
                    'ddl':
                    False,
                    'download_info':
                    nzbinfo['download_info']
                }
            else:
                logger.warn(
                    'no file found where it should be @ %s - is there another script that moves things after completion ?'
                    % hq[0]['DestDir'])
                return {'status': 'file not found', 'failed': False}

            if mylar.CONFIG.NZBGET_DIRECTORY is not None:
                logger.fdebug('DestDir being passed to cdh conversion is: %s' %
                              hq[0]['DestDir'])
                cdh = cdh_mapping.CDH_MAP(hq[0]['DestDir'],
                                          nzbget=True,
                                          nzbget_server=self.server)
                destdir = cdh.the_sequence()
                if destdir is not None:
                    logger.fdebug(
                        'NZBGet Destination folder Successfully set via config to: %s'
                        % destdir)
                else:
                    logger.fdebug(
                        'Unable to locate path (%s) on the machine that is running Mylar. If Mylar and nzbget are on separate machines, you need to set a directory location that is accessible to both'
                        % hq[0]['DestDir'])
                    return {'status': 'file not found', 'failed': False}

            if destdir is not None:
                return {
                    'status': True,
                    'name': re.sub('.nzb', '', hq[0]['Name']).strip(),
                    'location': destdir,
                    'failed': False,
                    'issueid': nzbinfo['issueid'],
                    'comicid': nzbinfo['comicid'],
                    'apicall': True,
                    'ddl': False,
                    'download_info': nzbinfo['download_info']
                }
        else:
            logger.warn('Could not find completed NZBID %s in history' % nzbid)
            return {'status': False}
Beispiel #36
0
    def Process(self):
        module = '[FAILED-DOWNLOAD]'

        myDB = db.DBConnection()

        if self.nzb_name and self.nzb_folder:
            self._log('Failed download has been detected: ' + self.nzb_name + ' in ' + self.nzb_folder)

            #since this has already been passed through the search module, which holds the IssueID in the nzblog,
            #let's find the matching nzbname and pass it the IssueID in order to mark it as Failed and then return
            #to the search module and continue trucking along.

            nzbname = self.nzb_name
            #remove extensions from nzb_name if they somehow got through (Experimental most likely)
            extensions = ('.cbr', '.cbz')

            if nzbname.lower().endswith(extensions):
                fd, ext = os.path.splitext(nzbname)
                self._log("Removed extension from nzb: " + ext)
                nzbname = re.sub(str(ext), '', str(nzbname))

            #replace spaces
            nzbname = re.sub(' ', '.', str(nzbname))
            nzbname = re.sub('[\,\:\?\'\(\)]', '', str(nzbname))
            nzbname = re.sub('[\&]', 'and', str(nzbname))
            nzbname = re.sub('_', '.', str(nzbname))

            logger.fdebug(module + ' After conversions, nzbname is : ' + str(nzbname))
            self._log("nzbname: " + str(nzbname))

            nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()

            if nzbiss is None:
                self._log("Failure - could not initially locate nzbfile in my database to rename.")
                logger.fdebug(module + ' Failure - could not locate nzbfile initially')
                # if failed on spaces, change it all to decimals and try again.
                nzbname = re.sub('_', '.', str(nzbname))
                self._log("trying again with this nzbname: " + str(nzbname))
                logger.fdebug(module + ' Trying to locate nzbfile again with nzbname of : ' + str(nzbname))
                nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
                if nzbiss is None:
                    logger.error(module + ' Unable to locate downloaded file to rename. PostProcessing aborted.')
                    self._log('Unable to locate downloaded file to rename. PostProcessing aborted.')
                    self.valreturn.append({"self.log": self.log,
                                           "mode": 'stop'})

                    return self.queue.put(self.valreturn)
                else:
                    self._log("I corrected and found the nzb as : " + str(nzbname))
                    logger.fdebug(module + ' Auto-corrected and found the nzb as : ' + str(nzbname))
                    issueid = nzbiss['IssueID']
            else:
                issueid = nzbiss['IssueID']
                logger.fdebug(module + ' Issueid: ' + str(issueid))
                sarc = nzbiss['SARC']
                #use issueid to get publisher, series, year, issue number

        else:
            issueid = self.issueid
            nzbiss = myDB.selectone("SELECT * from nzblog WHERE IssueID=?", [issueid]).fetchone()
            if nzbiss is None:
                logger.info(module + ' Cannot locate corresponding record in download history. This will be implemented soon.')
                self.valreturn.append({"self.log": self.log,
                                       "mode": 'stop'})
                return self.queue.put(self.valreturn)

            nzbname = nzbiss['NZBName']

        # find the provider.
        self.prov = nzbiss['PROVIDER']
        logger.info(module + ' Provider: ' + self.prov)

        # grab the id.
        self.id = nzbiss['ID']
        logger.info(module + ' ID: ' + self.id)
        annchk = "no"

        if 'annual' in nzbname.lower():
            logger.info(module + ' Annual detected.')
            annchk = "yes"
            issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
        else:
            issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()

        if issuenzb is not None:
            logger.info(module + ' issuenzb found.')
            if helpers.is_number(issueid):
                sandwich = int(issuenzb['IssueID'])
        else:
            logger.info(module + ' issuenzb not found.')
            #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
            #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
            if 'S' in issueid:
                sandwich = issueid
            elif 'G' in issueid or '-' in issueid:
                sandwich = 1
        try:
            if helpers.is_number(sandwich):
                if sandwich < 900000:
            # if sandwich is less than 900000 it's a normal watchlist download. Bypass.
                    pass
            else:
                logger.info('Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!')
                self._log(' Unable to locate downloaded file to rename. PostProcessing aborted.')
                self.valreturn.append({"self.log": self.log,
                                       "mode": 'stop'})
                return self.queue.put(self.valreturn)
        except NameError:
            logger.info('sandwich was not defined. Post-processing aborted...')
            self.valreturn.append({"self.log": self.log,
                                       "mode": 'stop'})

            return self.queue.put(self.valreturn)

        comicid = issuenzb['ComicID']
        issuenumOG = issuenzb['Issue_Number']
        logger.info(module + ' Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' that was downloaded using ' + self.prov)
        self._log('Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' downloaded using ' + self.prov)

        logger.info(module + ' Marking as a Failed Download.')
        self._log('Marking as a Failed Download.')

        ctrlVal = {"IssueID": issueid}
        Vals = {"Status":    'Failed'}
        myDB.upsert("issues", Vals, ctrlVal)

        ctrlVal = {"ID":       self.id,
                   "Provider": self.prov,
                   "NZBName":  nzbname}
        Vals = {"Status":       'Failed',
                "ComicName":    issuenzb['ComicName'],
                "Issue_Number": issuenzb['Issue_Number'],
                "IssueID":      issueid,
                "ComicID":      comicid,
                "DateFailed":   helpers.now()}
        myDB.upsert("failed", Vals, ctrlVal)

        logger.info(module + ' Successfully marked as Failed.')
        self._log('Successfully marked as Failed.')

        if mylar.CONFIG.FAILED_AUTO:
            logger.info(module + ' Sending back to search to see if we can find something that will not fail.')
            self._log('Sending back to search to see if we can find something better that will not fail.')
            self.valreturn.append({"self.log":    self.log,
                                   "mode":        'retry',
                                   "issueid":     issueid,
                                   "comicid":     comicid,
                                   "comicname":   issuenzb['ComicName'],
                                   "issuenumber": issuenzb['Issue_Number'],
                                   "annchk":      annchk})

            return self.queue.put(self.valreturn)
        else:
            logger.info(module + ' Stopping search here as automatic handling of failed downloads is not enabled *hint*')
            self._log('Stopping search here as automatic handling of failed downloads is not enabled *hint*')
            self.valreturn.append({"self.log": self.log,
                                   "mode": 'stop'})
            return self.queue.put(self.valreturn)
Beispiel #37
0
def dbUpdate(ComicIDList=None):

    myDB = db.DBConnection()
    #print "comicidlist:" + str(ComicIDList)
    if ComicIDList is None:
        comiclist = myDB.select(
            'SELECT ComicID, ComicName from comics WHERE Status="Active" or Status="Loading" order by LastUpdated ASC'
        )
    else:
        comiclist = ComicIDList

    logger.info('Starting update for %i active comics' % len(comiclist))

    for comic in comiclist:
        if ComicIDList is None:
            comicid = comic[0]
        else:
            comicid = comic
        #print "comicid:" + str(comicid)
        mismatch = "no"
        if not mylar.CV_ONLY or comicid[:1] == "G":
            CV_EXcomicid = myDB.action(
                "SELECT * from exceptions WHERE ComicID=?",
                [comicid]).fetchone()
            if CV_EXcomicid is None: pass
            else:
                if CV_EXcomicid['variloop'] == '99':
                    mismatch = "yes"
            if comicid[:1] == "G":
                mylar.importer.GCDimport(comicid)
            else:
                mylar.importer.addComictoDB(comicid, mismatch)
        else:
            if mylar.CV_ONETIMER == 1:
                logger.fdebug('CV_OneTimer option enabled...')

                #in order to update to JUST CV_ONLY, we need to delete the issues for a given series so it's a clean refresh.
                logger.fdebug(
                    'Gathering the status of all issues for the series.')
                issues = myDB.select('SELECT * FROM issues WHERE ComicID=?',
                                     [comicid])
                if mylar.ANNUALS_ON:
                    issues += myDB.select(
                        'SELECT * FROM annuals WHERE ComicID=?', [comicid])
                #store the issues' status for a given comicid, after deleting and readding, flip the status back to what it is currently.
                logger.fdebug('Deleting all issue data.')
                myDB.select('DELETE FROM issues WHERE ComicID=?', [comicid])
                myDB.select('DELETE FROM annuals WHERE ComicID=?', [comicid])
                logger.fdebug(
                    'Refreshing the series and pulling in new data using only CV.'
                )
                mylar.importer.addComictoDB(comicid,
                                            mismatch,
                                            calledfrom='dbupdate')
                issues_new = myDB.select(
                    'SELECT * FROM issues WHERE ComicID=?', [comicid])
                annuals = []
                ann_list = []
                if mylar.ANNUALS_ON:
                    annuals_list = myDB.select(
                        'SELECT * FROM annuals WHERE ComicID=?', [comicid])
                    ann_list += annuals_list
                    issues_new += annuals_list

                icount = 0
                logger.fdebug(
                    'Attempting to put the Statuses back how they were.')
                for issue in issues:
                    for issuenew in issues_new:
                        if issuenew['IssueID'] == issue['IssueID'] and issuenew[
                                'Status'] != issue['Status']:
                            #if the status is now Downloaded, keep status.
                            logger.info('existing status: ' +
                                        str(issuenew['Status']))
                            logger.info('new status: ' + str(issue['Status']))
                            if issuenew['Status'] == 'Downloaded' or issue[
                                    'Status'] == 'Snatched':
                                break
                            #change the status to the previous status
                            ctrlVAL = {'IssueID': issue['IssueID']}
                            newVAL = {'Status': issue['Status']}
                            if any(d['IssueID'] == str(issue['IssueID'])
                                   for d in ann_list):
                                logger.fdebug('annual detected for ' +
                                              str(issue['IssueID']) + ' #: ' +
                                              str(issue['Issue_Number']))
                                myDB.upsert("Annuals", newVAL, ctrlVAL)
                            else:
                                myDB.upsert("Issues", newVAL, ctrlVAL)
                            icount += 1
                            break
                logger.info(
                    'In converting data to CV only, I changed the status of ' +
                    str(icount) + ' issues.')
                mylar.CV_ONETIMER = 0
            else:
                mylar.importer.addComictoDB(comicid, mismatch)

        #check global skipped2wanted status here
        #if mylar.GLOBAL_SKIPPED2WANTED:
        #    logger.fdebug('Global change for ' + str(comicid) + ' - Marking all issues not present as Wanted.')
        #    mylar.webserve.skipped2wanted(comicid,True)

        time.sleep(5)  #pause for 5 secs so dont hammer CV and get 500 error
    logger.info('Update complete')
Beispiel #38
0
def upcoming_update(ComicID,
                    ComicName,
                    IssueNumber,
                    IssueDate,
                    forcecheck=None,
                    futurepull=None,
                    altissuenumber=None):
    # here we add to upcoming table...
    myDB = db.DBConnection()
    dspComicName = ComicName  #to make sure that the word 'annual' will be displayed on screen
    if 'annual' in ComicName.lower():
        adjComicName = re.sub("\\bannual\\b", "",
                              ComicName.lower())  # for use with comparisons.
        logger.fdebug('annual detected - adjusting name to : ' + adjComicName)
    else:
        adjComicName = ComicName
    controlValue = {"ComicID": ComicID}
    newValue = {
        "ComicName": adjComicName,
        "IssueNumber": str(IssueNumber),
        "DisplayComicName": dspComicName,
        "IssueDate": str(IssueDate)
    }

    #let's refresh the series here just to make sure if an issue is available/not.
    mismatch = "no"
    CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?",
                               [ComicID]).fetchone()
    if CV_EXcomicid is None: pass
    else:
        if CV_EXcomicid['variloop'] == '99':
            mismatch = "yes"
    lastupdatechk = myDB.action("SELECT * FROM comics WHERE ComicID=?",
                                [ComicID]).fetchone()
    if lastupdatechk is None:
        pullupd = "yes"
    else:
        c_date = lastupdatechk['LastUpdated']
        if c_date is None:
            logger.error(
                lastupdatechk['ComicName'] +
                ' failed during a previous add /refresh. Please either delete and readd the series, or try a refresh of the series.'
            )
            return
        c_obj_date = datetime.datetime.strptime(c_date, "%Y-%m-%d %H:%M:%S")
        n_date = datetime.datetime.now()
        absdiff = abs(n_date - c_obj_date)
        hours = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 3600.0
        # no need to hammer the refresh
        # let's check it every 5 hours (or more)
        #pullupd = "yes"
    if 'annual' in ComicName.lower():
        if mylar.ANNUALS_ON:
            issuechk = myDB.action(
                "SELECT * FROM annuals WHERE ComicID=? AND Issue_Number=?",
                [ComicID, IssueNumber]).fetchone()
        else:
            logger.fdebug(
                'Annual detected, but annuals not enabled. Ignoring result.')
            return
    else:
        issuechk = myDB.action(
            "SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?",
            [ComicID, IssueNumber]).fetchone()

    if issuechk is None and altissuenumber is not None:
        logger.info('altissuenumber is : ' + str(altissuenumber))
        issuechk = myDB.action(
            "SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?",
            [ComicID, helpers.issuedigits(altissuenumber)]).fetchone()
    if issuechk is None:
        if futurepull is None:
            logger.fdebug(
                adjComicName + ' Issue: ' + str(IssueNumber) +
                ' not present in listings to mark for download...updating comic and adding to Upcoming Wanted Releases.'
            )
            # we need to either decrease the total issue count, OR indicate that an issue is upcoming.
            upco_results = myDB.action(
                "SELECT COUNT(*) FROM UPCOMING WHERE ComicID=?",
                [ComicID]).fetchall()
            upco_iss = upco_results[0][0]
            #logger.info("upco_iss: " + str(upco_iss))
            if int(upco_iss) > 0:
                #logger.info("There is " + str(upco_iss) + " of " + str(ComicName) + " that's not accounted for")
                newKey = {"ComicID": ComicID}
                newVal = {"not_updated_db": str(upco_iss)}
                myDB.upsert("comics", newVal, newKey)
            elif int(upco_iss) <= 0 and lastupdatechk['not_updated_db']:
                #if not_updated_db has a value, and upco_iss is > 0, let's zero it back out cause it's updated now.
                newKey = {"ComicID": ComicID}
                newVal = {"not_updated_db": ""}
                myDB.upsert("comics", newVal, newKey)

            if hours > 5 or forcecheck == 'yes':
                pullupd = "yes"
                logger.fdebug('Now Refreshing comic ' + ComicName +
                              ' to make sure it is up-to-date')
                if ComicID[:1] == "G":
                    mylar.importer.GCDimport(ComicID, pullupd)
                else:
                    mylar.importer.addComictoDB(ComicID, mismatch, pullupd)
            else:
                logger.fdebug(
                    'It has not been longer than 5 hours since we last did this...we will wait so we do not hammer things.'
                )
                return
        else:
            # if futurepull is not None, let's just update the status and ComicID
            # NOTE: THIS IS CREATING EMPTY ENTRIES IN THE FUTURE TABLE. ???
            nKey = {"ComicID": ComicID}
            nVal = {"Status": "Wanted"}
            myDB.upsert("future", nVal, nKey)

    if issuechk is not None:
        if issuechk['Issue_Number'] == IssueNumber or issuechk[
                'Issue_Number'] == altissuenumber:
            logger.fdebug(
                'Comic series already up-to-date ... no need to refresh at this time.'
            )
            logger.fdebug('Available to be marked for download - checking...' +
                          adjComicName + ' Issue: ' +
                          str(issuechk['Issue_Number']))
            logger.fdebug('...Existing status: ' + str(issuechk['Status']))
            control = {"IssueID": issuechk['IssueID']}
            newValue['IssueID'] = issuechk['IssueID']
            if issuechk['Status'] == "Snatched":
                values = {"Status": "Snatched"}
                newValue['Status'] = "Snatched"
            elif issuechk['Status'] == "Downloaded":
                values = {"Status": "Downloaded"}
                newValue['Status'] = "Downloaded"
                #if the status is Downloaded and it's on the pullist - let's mark it so everyone can bask in the glory

            elif issuechk['Status'] == "Wanted":
                values = {"Status": "Wanted"}
                newValue['Status'] = "Wanted"
            elif issuechk['Status'] == "Archived":
                values = {"Status": "Archived"}
                newValue['Status'] = "Archived"
            else:
                values = {"Status": "Skipped"}
                newValue['Status'] = "Skipped"
            #was in wrong place :(
        else:
            logger.fdebug(
                'Issues do not match for some reason...weekly new issue: ' +
                str(IssueNumber))
            return

    if mylar.AUTOWANT_UPCOMING:
        #for issues not in db - to be added to Upcoming table.
        if issuechk is None:
            newValue['Status'] = "Wanted"
            logger.fdebug(
                '...Changing Status to Wanted and throwing it in the Upcoming section since it is not published yet.'
            )
        #this works for issues existing in DB...
        elif issuechk['Status'] == "Skipped":
            newValue['Status'] = "Wanted"
            values = {"Status": "Wanted"}
            logger.fdebug('...New status of Wanted')
        elif issuechk['Status'] == "Wanted":
            logger.fdebug('...Status already Wanted .. not changing.')
        else:
            logger.fdebug(
                '...Already have issue - keeping existing status of : ' +
                str(issuechk['Status']))

    if issuechk is None:
        myDB.upsert("upcoming", newValue, controlValue)
    else:
        logger.fdebug('--attempt to find errant adds to Wanted list')
        logger.fdebug('UpcomingNewValue: ' + str(newValue))
        logger.fdebug('UpcomingcontrolValue: ' + str(controlValue))
        if issuechk['IssueDate'] == '0000-00-00' and newValue[
                'IssueDate'] != '0000-00-00':
            logger.fdebug(
                'Found a 0000-00-00 issue - force updating series to try and get it proper.'
            )
            dateVal = {
                "IssueDate": newValue['IssueDate'],
                "ComicName": issuechk['ComicName'],
                "Status": newValue['Status'],
                "IssueNumber": issuechk['Issue_Number']
            }
            logger.fdebug('updating date in upcoming table to : ' +
                          str(newValue['IssueDate']))
            logger.fdebug('ComicID:' + str(controlValue))
            myDB.upsert("upcoming", dateVal, controlValue)
            logger.fdebug('Temporarily putting the Issue Date for ' +
                          str(issuechk['Issue_Number']) + ' to ' +
                          str(newValue['IssueDate']))
            values = {"IssueDate": newValue['IssueDate']}
            #if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID,pullupd='yes')
            #else: mylar.importer.addComictoDB(ComicID,mismatch,pullupd='yes')

        if 'annual' in ComicName.lower():
            myDB.upsert("annuals", values, control)
        else:
            myDB.upsert("issues", values, control)

        if issuechk['Status'] == 'Downloaded' or issuechk[
                'Status'] == 'Archived' or issuechk['Status'] == 'Snatched':
            logger.fdebug('updating Pull-list to reflect status.')
            downstats = {
                "Status": issuechk['Status'],
                "ComicID": issuechk['ComicID']
            }
            return downstats
Beispiel #39
0
    def Process(self):
            self._log("nzb name: " + str(self.nzb_name), logger.DEBUG)
            self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG)
            logger.fdebug("nzb name: " + str(self.nzb_name))
            logger.fdebug("nzb folder: " + str(self.nzb_folder))
            if mylar.USE_SABNZBD==0:
                logger.fdebug("Not using SABNzbd")
            else:
                # if the SAB Directory option is enabled, let's use that folder name and append the jobname.
                if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(mylar.SAB_DIRECTORY) > 4:
                    self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
    
                #lookup nzb_name in nzblog table to get issueid
    
                #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals
                #http://localhost:8080/sabnzbd/api?mode=set_config&section=misc&keyword=dirscan_speed&value=5
                querysab = str(mylar.SAB_HOST) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY)
                #logger.info("querysab_string:" + str(querysab))
                file = urllib2.urlopen(querysab)
                data = file.read()
                file.close()
                dom = parseString(data)

                try:
                    sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText
                except:
                    errorm = dom.getElementsByTagName('error')[0].firstChild.wholeText
                    logger.error(u"Error detected attempting to retrieve SAB data : " + errorm)
                    return
                sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText
                logger.fdebug("SAB Replace Spaces: " + str(sabreps))
                logger.fdebug("SAB Replace Dots: " + str(sabrepd))
            if mylar.USE_NZBGET==1:
                logger.fdebug("Using NZBGET")
                logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name)
            myDB = db.DBConnection()

            nzbname = self.nzb_name
            #remove extensions from nzb_name if they somehow got through (Experimental most likely)
            extensions = ('.cbr', '.cbz')

            if nzbname.lower().endswith(extensions):
                fd, ext = os.path.splitext(nzbname)
                self._log("Removed extension from nzb: " + ext, logger.DEBUG)
                nzbname = re.sub(str(ext), '', str(nzbname))

            #replace spaces
            nzbname = re.sub(' ', '.', str(nzbname))
            nzbname = re.sub('[\,\:\?]', '', str(nzbname))
            nzbname = re.sub('[\&]', 'and', str(nzbname))

            logger.fdebug("After conversions, nzbname is : " + str(nzbname))
#            if mylar.USE_NZBGET==1:
#                nzbname=self.nzb_name
            self._log("nzbname: " + str(nzbname), logger.DEBUG)

            nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()

            if nzbiss is None:
                self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG)
                logger.fdebug("Failure - could not locate nzbfile initially.")
                # if failed on spaces, change it all to decimals and try again.
                nzbname = re.sub('_', '.', str(nzbname))
                self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG)
                logger.fdebug("trying again with nzbname of : " + str(nzbname))
                nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
                if nzbiss is None:
                    logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.")
                    return
                else:
                    self._log("I corrected and found the nzb as : " + str(nzbname))
                    logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname))
                    issueid = nzbiss['IssueID']
            else: 
                issueid = nzbiss['IssueID']
                logger.fdebug("issueid:" + str(issueid))
                sarc = nzbiss['SARC']
                #use issueid to get publisher, series, year, issue number
            issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone()
            if issuenzb is not None:
                if helpers.is_number(issueid):
                    sandwich = int(issuenzb['IssueID'])
            else:
                #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
                #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
                if 'S' in issueid:
                    sandwich = issueid
                elif 'G' in issueid: 
                    sandwich = 1
            if helpers.is_number(sandwich):
                if sandwich < 900000:
                    # if sandwich is less than 900000 it's a normal watchlist download. Bypass.
                    pass
            else:
                if issuenzb is None or 'S' in sandwich or int(sandwich) >= 900000:
                    # this has no issueID, therefore it's a one-off or a manual post-proc.
                    # At this point, let's just drop it into the Comic Location folder and forget about it..
                    if 'S' in sandwich:
                        self._log("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
                        logger.info("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
                        if mylar.STORYARCDIR:
                            storyarcd = os.path.join(mylar.DESTINATION_DIR, "StoryArcs", sarc)
                            self._log("StoryArc Directory set to : " + storyarcd, logger.DEBUG)
                        else:
                            self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG)

                    else:
                        self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.", logger.DEBUG)
                        logger.info("One-off mode enabled for Post-Processing. Will move into Grab-bag directory.")
                        self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG)

                    for root, dirnames, filenames in os.walk(self.nzb_folder):
                        for filename in filenames:
                            if filename.lower().endswith(extensions):
                                ofilename = filename
                                path, ext = os.path.splitext(ofilename)
      
                    if 'S' in sandwich:
                        if mylar.STORYARCDIR:
                            grdst = storyarcd
                        else:
                            grdst = mylar.DESTINATION_DIR
                    else:
                        if mylar.GRABBAG_DIR:
                            grdst = mylar.GRABBAG_DIR
                        else:
                            grdst = mylar.DESTINATION_DIR

                    filechecker.validateAndCreateDirectory(grdst, True)
    
                    grab_dst = os.path.join(grdst, ofilename)
                    self._log("Destination Path : " + grab_dst, logger.DEBUG)
                    logger.info("Destination Path : " + grab_dst)
                    grab_src = os.path.join(self.nzb_folder, ofilename)
                    self._log("Source Path : " + grab_src, logger.DEBUG)
                    logger.info("Source Path : " + grab_src)

                    logger.info("Moving " + str(ofilename) + " into directory : " + str(grdst))

                    try:
                        shutil.move(grab_src, grab_dst)
                    except (OSError, IOError):
                        self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
                        logger.debug("Failed to move directory - check directories and manually re-run.")
                        return
                    #tidyup old path
                    try:
                        shutil.rmtree(self.nzb_folder)
                    except (OSError, IOError):
                        self._log("Failed to remove temporary directory.", logger.DEBUG)
                        logger.debug("Failed to remove temporary directory - check directory and manually re-run.")
                        return

                    logger.debug("Removed temporary directory : " + str(self.nzb_folder))
                    self._log("Removed temporary directory : " + self.nzb_folder, logger.DEBUG)
                    #delete entry from nzblog table
                    myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])

                    if 'S' in issueid:
                        issuearcid = re.sub('S', '', issueid)
                        logger.info("IssueArcID is : " + str(issuearcid))
                        ctrlVal = {"IssueArcID":  issuearcid}
                        newVal = {"Status":    "Downloaded",
                                  "Location":  grab_dst }
                        myDB.upsert("readinglist",newVal,ctrlVal)
                        logger.info("updated status to Downloaded")
                    return self.log

            comicid = issuenzb['ComicID']
            issuenumOG = issuenzb['Issue_Number']
            #issueno = str(issuenum).split('.')[0]
            #new CV API - removed all decimals...here we go AGAIN!
            issuenum = issuenumOG
            issue_except = 'None'
            if 'au' in issuenum.lower():
                issuenum = re.sub("[^0-9]", "", issuenum)
                issue_except = ' AU'
            if '.' in issuenum:
                iss_find = issuenum.find('.')
                iss_b4dec = issuenum[:iss_find]
                iss_decval = issuenum[iss_find+1:]
                if int(iss_decval) == 0:
                    iss = iss_b4dec
                    issdec = int(iss_decval)
                    issueno = str(iss)
                    self._log("Issue Number: " + str(issueno), logger.DEBUG)
                    logger.fdebug("Issue Number: " + str(issueno))
                else:
                    if len(iss_decval) == 1:
                        iss = iss_b4dec + "." + iss_decval
                        issdec = int(iss_decval) * 10
                    else:
                        iss = iss_b4dec + "." + iss_decval.rstrip('0')
                        issdec = int(iss_decval.rstrip('0')) * 10
                    issueno = iss_b4dec
                    self._log("Issue Number: " + str(iss), logger.DEBUG)
                    logger.fdebug("Issue Number: " + str(iss))
            else:
                iss = issuenum
                issueno = str(iss)
            # issue zero-suppression here
            if mylar.ZERO_LEVEL == "0": 
                zeroadd = ""
            else:
                if mylar.ZERO_LEVEL_N  == "none": zeroadd = ""
                elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
                elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"

            logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N))

            if str(len(issueno)) > 1:
                if int(issueno) < 10:
                    self._log("issue detected less than 10", logger.DEBUG)
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                            prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None': 
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                elif int(issueno) >= 10 and int(issueno) < 100:
                    self._log("issue detected greater than 10, but less than 100", logger.DEBUG)
                    if mylar.ZERO_LEVEL_N == "none":
                        zeroadd = ""
                    else:
                        zeroadd = "0"
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                           prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                else:
                    self._log("issue detected greater than 100", logger.DEBUG)
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                    prettycomiss = str(issueno)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
            else:
                prettycomiss = str(issueno)
                self._log("issue length error - cannot determine length. Defaulting to None:  " + str(prettycomiss), logger.DEBUG)

            logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss))
            issueyear = issuenzb['IssueDate'][:4]
            self._log("Issue Year: " + str(issueyear), logger.DEBUG)
            logger.fdebug("Issue Year : " + str(issueyear))
            comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
            publisher = comicnzb['ComicPublisher']
            self._log("Publisher: " + publisher, logger.DEBUG)
            logger.fdebug("Publisher: " + str(publisher))
            #we need to un-unicode this to make sure we can write the filenames properly for spec.chars
            series = comicnzb['ComicName'].encode('ascii', 'ignore').strip()
            self._log("Series: " + series, logger.DEBUG)
            logger.fdebug("Series: " + str(series))
            seriesyear = comicnzb['ComicYear']
            self._log("Year: " + seriesyear, logger.DEBUG)
            logger.fdebug("Year: "  + str(seriesyear))
            comlocation = comicnzb['ComicLocation']
            self._log("Comic Location: " + comlocation, logger.DEBUG)
            logger.fdebug("Comic Location: " + str(comlocation))
            comversion = comicnzb['ComicVersion']
            self._log("Comic Version: " + str(comversion), logger.DEBUG)
            logger.fdebug("Comic Version: " + str(comversion))
            if comversion is None:
                comversion = 'None'
            #if comversion is None, remove it so it doesn't populate with 'None'
            if comversion == 'None':
                chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
                chunk_f = re.compile(r'\s+')
                chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                self._log("No version # found for series - tag will not be available for renaming.", logger.DEBUG)
                logger.fdebug("No version # found for series, removing from filename")
                logger.fdebug("new format is now: " + str(chunk_file_format))
            else:
                chunk_file_format = mylar.FILE_FORMAT
            #Run Pre-script

            if mylar.ENABLE_PRE_SCRIPTS:
                nzbn = self.nzb_name #original nzb name
                nzbf = self.nzb_folder #original nzb folder
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_pre_scripts(nzbn, nzbf, seriesmetadata )

        #rename file and move to new path
        #nfilename = series + " " + issueno + " (" + seriesyear + ")"

            file_values = {'$Series':    series,
                           '$Issue':     prettycomiss,
                           '$Year':      issueyear,
                           '$series':    series.lower(),
                           '$Publisher': publisher,
                           '$publisher': publisher.lower(),
                           '$VolumeY':   'V' + str(seriesyear),
                           '$VolumeN':   comversion
                          }

            ofilename = None

            for root, dirnames, filenames in os.walk(self.nzb_folder):
                for filename in filenames:
                    if filename.lower().endswith(extensions):
                        ofilename = filename
                        path, ext = os.path.splitext(ofilename)

            if ofilename is None:
                logger.error(u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.")
                return
            self._log("Original Filename: " + ofilename, logger.DEBUG)
            self._log("Original Extension: " + ext, logger.DEBUG)
            logger.fdebug("Original Filname: " + str(ofilename))
            logger.fdebug("Original Extension: " + str(ext))

            if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES:
                self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG)
                logger.fdebug("Rename Files isn't enabled - keeping original filename.")
                #check if extension is in nzb_name - will screw up otherwise
                if ofilename.lower().endswith(extensions):
                    nfilename = ofilename[:-4]
                else:
                    nfilename = ofilename
            else:
                nfilename = helpers.replace_all(chunk_file_format, file_values)
                if mylar.REPLACE_SPACES:
                    #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                    nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
            nfilename = re.sub('[\,\:\?]', '', nfilename)
            self._log("New Filename: " + nfilename, logger.DEBUG)
            logger.fdebug("New Filename: " + str(nfilename))

            src = os.path.join(self.nzb_folder, ofilename)

            filechecker.validateAndCreateDirectory(comlocation, True)

            if mylar.LOWERCASE_FILENAMES:
                dst = (comlocation + "/" + nfilename + ext).lower()
            else:
                dst = comlocation + "/" + nfilename + ext.lower()    
            self._log("Source:" + src, logger.DEBUG)
            self._log("Destination:" +  dst, logger.DEBUG)
            logger.fdebug("Source: " + str(src))
            logger.fdebug("Destination: " + str(dst))

            os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
            src = os.path.join(self.nzb_folder, str(nfilename + ext))
            try:
                shutil.move(src, dst)
            except (OSError, IOError):
                self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return
            #tidyup old path
            try:
                shutil.rmtree(self.nzb_folder)
            except (OSError, IOError):
                self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return

            self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG)
                    #delete entry from nzblog table
            myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
                    #update snatched table to change status to Downloaded
            updater.foundsearch(comicid, issueid, down='True')
                    #force rescan of files
            updater.forceRescan(comicid)
            logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) )
            self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG)

            if mylar.PROWL_ENABLED:
                pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG
                logger.info(u"Prowl request")
                prowl = notifiers.PROWL()
                prowl.notify(pushmessage,"Download and Postprocessing completed")

            if mylar.NMA_ENABLED:
                nma = notifiers.NMA()
                nma.notify(series, str(issueyear), str(issuenumOG))

            if mylar.PUSHOVER_ENABLED:
                pushmessage = series + ' (' + str(issueyear) + ') - issue #' + str(issuenumOG)
                logger.info(u"Pushover request")
                pushover = notifiers.PUSHOVER()
                pushover.notify(pushmessage, "Download and Post-Processing completed")
             
            # retrieve/create the corresponding comic objects

            if mylar.ENABLE_EXTRA_SCRIPTS:
                folderp = str(dst) #folder location after move/rename
                nzbn = self.nzb_name #original nzb name
                filen = str(nfilename + ext) #new filename
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata )

            return self.log
Beispiel #40
0
def torsend2client(seriesname, issue, seriesyear, linkit, site):
    logger.info('matched on ' + str(seriesname))
    filename = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '', seriesname)
    filename = re.sub(' ', '_', filename)
    filename += "_" + str(issue) + "_" + str(seriesyear)
    if site == 'CBT':
        logger.info(linkit)
        linkit = str(linkit) + '&passkey=' + str(mylar.CBT_PASSKEY)

    if linkit[-7:] != "torrent":  # and site != "KAT":
        filename += ".torrent"

    if mylar.TORRENT_LOCAL and mylar.LOCAL_WATCHDIR is not None:
        filepath = os.path.join(mylar.LOCAL_WATCHDIR, filename)
        logger.fdebug('filename for torrent set to : ' + filepath)
    elif mylar.TORRENT_SEEDBOX and mylar.SEEDBOX_WATCHDIR is not None:
        filepath = os.path.join(mylar.CACHE_DIR, filename)
        logger.fdebug('filename for torrent set to : ' + filepath)
    else:
        logger.error(
            'No Local Watch Directory or Seedbox Watch Directory specified. Set it and try again.'
        )
        return "fail"

    try:
        request = urllib2.Request(linkit)
        #request.add_header('User-Agent', str(mylar.USER_AGENT))
        request.add_header('Accept-encoding', 'gzip')

        if site == 'KAT':
            stfind = linkit.find('?')
            kat_referrer = linkit[:stfind]
            request.add_header('Referer', kat_referrer)
            logger.fdebug('KAT Referer set to :' + kat_referrer)


#        response = helpers.urlretrieve(urllib2.urlopen(request), filepath)
        response = urllib2.urlopen(request)
        logger.fdebug('retrieved response.')

        if site == 'KAT':
            if response.info(
            )['content-encoding'] == 'gzip':  #.get('Content-Encoding') == 'gzip':
                logger.fdebug('gzip detected')
                buf = StringIO(response.read())
                logger.fdebug('gzip buffered')
                f = gzip.GzipFile(fileobj=buf)
                logger.fdebug('gzip filed.')
                torrent = f.read()
                logger.fdebug('gzip read.')
        else:
            torrent = response.read()

    except Exception, e:
        logger.warn('Error fetching data from %s: %s' % (site, e))
        return "fail"
Beispiel #41
0
def torrents(pickfeed=None, seriesname=None, issue=None):
    if pickfeed is None:
        pickfeed = 1
    #else:
    #    print "pickfeed is " + str(pickfeed)
    passkey = mylar.CBT_PASSKEY
    srchterm = None

    if seriesname:
        srchterm = re.sub(' ', '%20', seriesname)
    if issue:
        srchterm += '%20' + str(issue)

    if mylar.KAT_PROXY:
        if mylar.KAT_PROXY.endswith('/'):
            kat_url = mylar.KAT_PROXY
        else:
            kat_url = mylar.KAT_PROXY + '/'
    else:
        kat_url = 'http://kat.ph/'

    if pickfeed == 'KAT':
        #we need to cycle through both categories (comics & other) - so we loop.
        loopit = 2
    else:
        loopit = 1

    lp = 0

    title = []
    link = []
    description = []
    seriestitle = []
    i = 0

    feeddata = []
    myDB = db.DBConnection()
    torthekat = []
    katinfo = {}

    while (lp < loopit):
        if lp == 0 and loopit == 2:
            pickfeed = '2'
        elif lp == 1 and loopit == 2:
            pickfeed = '5'

        feedtype = None

        if pickfeed == "1":  # cbt rss feed based on followlist
            feed = "http://comicbt.com/rss.php?action=browse&passkey=" + str(
                passkey) + "&type=dl"
            feedtype = ' from the New Releases RSS Feed for comics'
        elif pickfeed == "2" and srchterm is not None:  # kat.ph search
            feed = kat_url + "usearch/" + str(
                srchterm) + "%20category%3Acomics%20seeds%3A" + str(
                    mylar.MINSEEDS) + "/?rss=1"
        elif pickfeed == "3":  # kat.ph rss feed
            feed = kat_url + "usearch/category%3Acomics%20seeds%3A" + str(
                mylar.MINSEEDS) + "/?rss=1"
            feedtype = ' from the New Releases RSS Feed for comics'
        elif pickfeed == "4":  #cbt follow link
            feed = "http://comicbt.com/rss.php?action=follow&passkey=" + str(
                passkey) + "&type=dl"
            feedtype = ' from your CBT Followlist RSS Feed'
        elif pickfeed == "5" and srchterm is not None:  # kat.ph search (category:other since some 0-day comics initially get thrown there until categorized)
            feed = kat_url + "usearch/" + str(
                srchterm) + "%20category%3Aother%20seeds%3A1/?rss=1"
        elif pickfeed == "6":  # kat.ph rss feed (category:other so that we can get them quicker if need-be)
            feed = kat_url + "usearch/.cbr%20category%3Aother%20seeds%3A" + str(
                mylar.MINSEEDS) + "/?rss=1"
            feedtype = ' from the New Releases for category Other RSS Feed that contain comics'
        elif pickfeed == "7":  # cbt series link
            #           seriespage = "http://comicbt.com/series.php?passkey=" + str(passkey)
            feed = "http://comicbt.com/rss.php?action=series&series=" + str(
                seriesno) + "&passkey=" + str(passkey)
        else:
            logger.error('invalid pickfeed denoted...')
            return

        #print 'feed URL: ' + str(feed)

        if pickfeed == "7":  # we need to get the series # first
            seriesSearch(seriespage, seriesname)

        feedme = feedparser.parse(feed)

        if pickfeed == "3" or pickfeed == "6" or pickfeed == "2" or pickfeed == "5":
            picksite = 'KAT'
        elif pickfeed == "1" or pickfeed == "4":
            picksite = 'CBT'

        for entry in feedme['entries']:
            if pickfeed == "3" or pickfeed == "6":
                tmpsz = feedme.entries[i].enclosures[0]
                feeddata.append({
                    'site': picksite,
                    'title': feedme.entries[i].title,
                    'link': tmpsz['url'],
                    'pubdate': feedme.entries[i].updated,
                    'size': tmpsz['length']
                })

                #print ("Site: KAT")
                #print ("Title: " + str(feedme.entries[i].title))
                #print ("Link: " + str(tmpsz['url']))
                #print ("pubdate: " + str(feedme.entries[i].updated))
                #print ("size: " + str(tmpsz['length']))

            elif pickfeed == "2" or pickfeed == "5":
                tmpsz = feedme.entries[i].enclosures[0]
                torthekat.append({
                    'site': picksite,
                    'title': feedme.entries[i].title,
                    'link': tmpsz['url'],
                    'pubdate': feedme.entries[i].updated,
                    'size': tmpsz['length']
                })

                #print ("Site: KAT")
                #print ("Title: " + str(feedme.entries[i].title))
                #print ("Link: " + str(tmpsz['url']))
                #print ("pubdate: " + str(feedme.entries[i].updated))
                #print ("size: " + str(tmpsz['length']))

            elif pickfeed == "1" or pickfeed == "4":
                if pickfeed == "1":
                    tmpdesc = feedme.entries[i].description
                    #break it down to get the Size since it's available on THIS CBT feed only.
                    sizestart = tmpdesc.find('Size:')
                    sizeend = tmpdesc.find('Leechers:')
                    sizestart += 5  # to get to the end of the word 'Size:'
                    tmpsize = tmpdesc[sizestart:sizeend].strip()
                    fdigits = re.sub("[^0123456789\.]", "", tmpsize).strip()
                    if '.' in fdigits:
                        decfind = fdigits.find('.')
                        wholenum = fdigits[:decfind]
                        decnum = fdigits[decfind + 1:]
                    else:
                        wholenum = fdigits
                        decnum = 0
                    if 'MB' in tmpsize:
                        wholebytes = int(wholenum) * 1048576
                        wholedecimal = (int(decnum) * 1048576) / 100
                        justdigits = wholebytes + wholedecimal
                    else:
                        #it's 'GB' then
                        wholebytes = (int(wholenum) * 1024) * 1048576
                        wholedecimal = ((int(decnum) * 1024) * 1048576) / 100
                        justdigits = wholebytes + wholedecimal
                    #Get the # of seeders.
                    seedstart = tmpdesc.find('Seeders:')
                    seedend = tmpdesc.find('Added:')
                    seedstart += 8  # to get to the end of the word 'Seeders:'
                    tmpseed = tmpdesc[seedstart:seedend].strip()
                    seeddigits = re.sub("[^0123456789\.]", "", tmpseed).strip()

                else:
                    justdigits = None  #size not available in follow-list rss feed
                    seeddigits = 0  #number of seeders not available in follow-list rss feed

                if int(mylar.MINSEEDS) >= int(seeddigits):
                    feeddata.append({
                        'site': picksite,
                        'title': feedme.entries[i].title,
                        'link': feedme.entries[i].link,
                        'pubdate': feedme.entries[i].updated,
                        'size': justdigits
                    })
                #print ("Site: CBT")
                #print ("Title: " + str(feeddata[i]['Title']))
                #print ("Link: " + str(feeddata[i]['Link']))
                #print ("pubdate: " + str(feeddata[i]['Pubdate']))

            i += 1

        if feedtype is None:
            logger.fdebug('[' + picksite + '] there were ' + str(i) +
                          ' results..')
        else:
            logger.fdebug('[' + picksite + '] there were ' + str(i) +
                          ' results ' + feedtype)
        lp += 1

    if not seriesname:
        rssdbupdate(feeddata, i, 'torrent')
    else:
        katinfo['entries'] = torthekat
        return katinfo
    return
Beispiel #42
0
    def historycheck(self, nzbinfo, roundtwo=False):
        sendresponse = nzbinfo['nzo_id']
        hist_params = {
            'mode': 'history',
            'category': mylar.CONFIG.SAB_CATEGORY,
            'failed': 0,
            'output': 'json',
            'apikey': mylar.CONFIG.SAB_APIKEY
        }

        sab_check = None
        if mylar.CONFIG.SAB_VERSION is None:
            try:
                sc = mylar.webserve.WebInterface()
                sab_check = sc.SABtest(sabhost=mylar.CONFIG.SAB_HOST,
                                       sabusername=mylar.CONFIG.SAB_USERNAME,
                                       sabpassword=mylar.CONFIG.SAB_PASSWORD,
                                       sabapikey=mylar.CONFIG.SAB_APIKEY)
            except Exception as e:
                logger.warn(
                    '[SABNZBD-VERSION-TEST] Exception encountered trying to retrieve SABnzbd version: %s. Setting history length to last 200 items.'
                    % e)
                hist_params['limit'] = 200
                sab_check = 'some value'
            else:
                sab_check = None

        if sab_check is None:
            #set min_sab to 3.2.0 since 3.2.0 beta 1 has the api call for history search by nzo_id
            try:
                sab_minimum_version = '3.2.0'
                min_sab = re.sub('[^0-9]', '', sab_minimum_version)
                sab_vers = mylar.CONFIG.SAB_VERSION
                if 'beta' in sab_vers:
                    sab_vers = re.sub('[^0-9]', '', sab_vers)
                    if len(sab_vers) > 3:
                        sab_vers = sab_vers[:
                                            -1]  # remove beta value entirely...
                if parse_version(sab_vers) >= parse_version(min_sab):
                    logger.fdebug(
                        'SABnzbd version is higher than 3.2.0. Querying history based on nzo_id directly.'
                    )
                    hist_params['nzo_ids'] = sendresponse
                else:
                    logger.fdebug(
                        'SABnzbd version is less than 3.2.0. Querying history based on history size of 200.'
                    )
                    hist_params['limit'] = 200
            except Exception as e:
                logger.warn(
                    '[SABNZBD-VERSION-CHECK] Exception encountered trying to compare installed version [%s] to [%s]. Setting history length to last 200 items. (error: %s)'
                    % (mylar.CONFIG.SAB_VERSION, sab_minimum_version, e))
                hist_params['limit'] = 200

        hist = requests.get(self.sab_url, params=hist_params, verify=False)
        historyresponse = hist.json()
        #logger.info(historyresponse)
        histqueue = historyresponse['history']
        found = {'status': False}
        nzo_exists = False

        try:
            for hq in histqueue['slots']:
                logger.fdebug('nzo_id: %s --- %s [%s]' %
                              (hq['nzo_id'], sendresponse, hq['status']))
                if hq['nzo_id'] == sendresponse and any([
                        hq['status'] == 'Completed', hq['status'] == 'Running',
                        'comicrn' in hq['script'].lower()
                ]):
                    nzo_exists = True
                    logger.info(
                        'found matching completed item in history. Job has a status of %s'
                        % hq['status'])
                    if 'comicrn' in hq['script'].lower():
                        logger.warn(
                            'ComicRN has been detected as being active for this category & download. Completed Download Handling will NOT be performed due to this.'
                        )
                        logger.warn(
                            'Either disable Completed Download Handling for SABnzbd within Mylar, or remove ComicRN from your category script in SABnzbd.'
                        )
                        return {'status': 'double-pp', 'failed': False}

                    if os.path.isfile(hq['storage']):
                        logger.fdebug('location found @ %s' % hq['storage'])
                        found = {
                            'status':
                            True,
                            'name':
                            ntpath.basename(
                                hq['storage']
                            ),  #os.pathre.sub('.nzb', '', hq['nzb_name']).strip(),
                            'location':
                            os.path.abspath(
                                os.path.join(hq['storage'], os.pardir)),
                            'failed':
                            False,
                            'issueid':
                            nzbinfo['issueid'],
                            'comicid':
                            nzbinfo['comicid'],
                            'apicall':
                            True,
                            'ddl':
                            False
                        }
                        break
                    else:
                        logger.error(
                            'no file found where it should be @ %s - is there another script that moves things after completion ?'
                            % hq['storage'])
                        return {'status': 'file not found', 'failed': False}

                elif hq['nzo_id'] == sendresponse and hq['status'] == 'Failed':
                    nzo_exists = True
                    #get the stage / error message and see what we can do
                    stage = hq['stage_log']
                    logger.fdebug('stage: %s' % (stage, ))
                    for x in stage:
                        if 'Failed' in x['actions'] and any(
                            [x['name'] == 'Unpack', x['name'] == 'Repair']):
                            if 'moving' in x['actions']:
                                logger.warn(
                                    'There was a failure in SABnzbd during the unpack/repair phase that caused a failure: %s'
                                    % x['actions'])
                            else:
                                logger.warn(
                                    'Failure occured during the Unpack/Repair phase of SABnzbd. This is probably a bad file: %s'
                                    % x['actions'])
                                if mylar.FAILED_DOWNLOAD_HANDLING is True:
                                    found = {
                                        'status':
                                        True,
                                        'name':
                                        re.sub('.nzb', '',
                                               hq['nzb_name']).strip(),
                                        'location':
                                        os.path.abspath(
                                            os.path.join(
                                                hq['storage'], os.pardir)),
                                        'failed':
                                        True,
                                        'issueid':
                                        sendresponse['issueid'],
                                        'comicid':
                                        sendresponse['comicid'],
                                        'apicall':
                                        True,
                                        'ddl':
                                        False
                                    }
                            break
                    if found['status'] is False:
                        return {'status': 'failed_in_sab', 'failed': False}
                    else:
                        break
                elif hq['nzo_id'] == sendresponse:
                    nzo_exists = True
                    logger.fdebug(
                        'nzo_id: %s found while processing queue in an unhandled status: %s'
                        % (hq['nzo_id'], hq['status']))
                    if hq['status'] == 'Queued' and roundtwo is False:
                        time.sleep(4)
                        return self.historycheck(nzbinfo, roundtwo=True)
                    else:
                        return {
                            'failed': False,
                            'status':
                            'unhandled status of: %s' % (hq['status'])
                        }

            if not nzo_exists:
                logger.error(
                    'Cannot find nzb %s in the queue.  Was it removed?' %
                    sendresponse)
                time.sleep(5)
                if roundtwo is False:
                    return self.historycheck(nzbinfo, roundtwo=True)
                else:
                    return {'status': 'nzb removed', 'failed': False}
        except Exception as e:
            logger.warn('error %s' % e)
            return {'status': False, 'failed': False}

        return found
Beispiel #43
0
def torrentdbsearch(seriesname, issue, comicid=None, nzbprov=None):
    myDB = db.DBConnection()
    seriesname_alt = None
    if comicid is None or comicid == 'None':
        pass
    else:
        logger.fdebug('ComicID: ' + str(comicid))
        snm = myDB.selectone("SELECT * FROM comics WHERE comicid=?",
                             [comicid]).fetchone()
        if snm is None:
            logger.fdebug('Invalid ComicID of ' + str(comicid) +
                          '. Aborting search.')
            return
        else:
            seriesname = snm['ComicName']
            seriesname_alt = snm['AlternateSearch']

    #remove 'and' and 'the':
    tsearch_rem1 = re.sub("\\band\\b", "%", seriesname.lower())
    tsearch_rem2 = re.sub("\\bthe\\b", "%", tsearch_rem1.lower())
    tsearch_removed = re.sub('\s+', ' ', tsearch_rem2)
    tsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\-\;\/\\=\?\&\.\s]', '%',
                                tsearch_removed)
    tsearch = tsearch_seriesname + "%"
    logger.fdebug('tsearch : ' + str(tsearch))
    AS_Alt = []
    tresults = []

    if mylar.ENABLE_CBT:
        tresults = myDB.select(
            "SELECT * FROM rssdb WHERE Title like ? AND Site='CBT'", [tsearch])
    if mylar.ENABLE_KAT:
        tresults += myDB.select(
            "SELECT * FROM rssdb WHERE Title like ? AND Site='KAT'", [tsearch])

    logger.fdebug('seriesname_alt:' + str(seriesname_alt))
    if seriesname_alt is None or seriesname_alt == 'None':
        if tresults is None:
            logger.fdebug('no Alternate name given. Aborting search.')
            return "no results"
    else:
        chkthealt = seriesname_alt.split('##')
        if chkthealt == 0:
            AS_Alternate = seriesname_alt
            AS_Alt.append(seriesname_alt)
        for calt in chkthealt:
            AS_Alter = re.sub('##', '', calt)
            u_altsearchcomic = AS_Alter.encode('ascii', 'ignore').strip()
            AS_Altrem = re.sub("\\band\\b", "", u_altsearchcomic.lower())
            AS_Altrem = re.sub("\\bthe\\b", "", AS_Altrem.lower())

            AS_Alternate = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\+\'\&\?\@\s]', '%',
                                  AS_Altrem)

            AS_Altrem_mod = re.sub('[\&]', ' ', AS_Altrem)
            AS_formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',
                                             AS_Altrem_mod)
            AS_formatrem_seriesname = re.sub('\s+', ' ',
                                             AS_formatrem_seriesname)
            if AS_formatrem_seriesname[:1] == ' ':
                AS_formatrem_seriesname = AS_formatrem_seriesname[1:]
            AS_Alt.append(AS_formatrem_seriesname)

            AS_Alternate += '%'

            if mylar.ENABLE_CBT:
                #print "AS_Alternate:" + str(AS_Alternate)
                tresults += myDB.select(
                    "SELECT * FROM rssdb WHERE Title like ? AND Site='CBT'",
                    [AS_Alternate])
            if mylar.ENABLE_KAT:
                tresults += myDB.select(
                    "SELECT * FROM rssdb WHERE Title like ? AND Site='KAT'",
                    [AS_Alternate])

    if tresults is None:
        logger.fdebug('torrent search returned no results for ' + seriesname)
        return "no results"

    extensions = ('cbr', 'cbz')
    tortheinfo = []
    torinfo = {}

    for tor in tresults:
        torsplit = tor['Title'].split('/')
        logger.fdebug('tor-Title: ' + tor['Title'])
        logger.fdebug('there are ' + str(len(torsplit)) +
                      ' sections in this title')
        i = 0
        if nzbprov is not None:
            if nzbprov != tor['Site']:
                logger.fdebug('this is a result from ' + str(tor['Site']) +
                              ', not the site I am looking for of ' +
                              str(nzbprov))
                continue
        #0 holds the title/issue and format-type.
        while (i < len(torsplit)):
            #we'll rebuild the string here so that it's formatted accordingly to be passed back to the parser.
            logger.fdebug('section(' + str(i) + '): ' + str(torsplit[i]))
            #remove extensions
            titletemp = torsplit[i]
            titletemp = re.sub('cbr', '', str(titletemp))
            titletemp = re.sub('cbz', '', str(titletemp))
            titletemp = re.sub('none', '', str(titletemp))

            if i == 0:
                rebuiltline = str(titletemp)
            else:
                rebuiltline = rebuiltline + ' (' + str(titletemp) + ')'
            i += 1

        logger.fdebug('rebuiltline is :' + str(rebuiltline))

        seriesname_mod = seriesname
        foundname_mod = torsplit[0]
        seriesname_mod = re.sub("\\band\\b", " ", seriesname_mod.lower())
        foundname_mod = re.sub("\\band\\b", " ", foundname_mod.lower())
        seriesname_mod = re.sub("\\bthe\\b", " ", seriesname_mod.lower())
        foundname_mod = re.sub("\\bthe\\b", " ", foundname_mod.lower())

        seriesname_mod = re.sub('[\&]', ' ', seriesname_mod)
        foundname_mod = re.sub('[\&]', ' ', foundname_mod)

        formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\=\?\.]', '',
                                      seriesname_mod)
        formatrem_seriesname = re.sub('[\-]', ' ', formatrem_seriesname)
        formatrem_seriesname = re.sub(
            '[\/]', ' ', formatrem_seriesname
        )  #not necessary since seriesname in a torrent file won't have /
        formatrem_seriesname = re.sub('\s+', ' ', formatrem_seriesname)
        if formatrem_seriesname[:1] == ' ':
            formatrem_seriesname = formatrem_seriesname[1:]

        formatrem_torsplit = re.sub('[\'\!\@\#\$\%\:\;\\=\?\.]', '',
                                    foundname_mod)
        formatrem_torsplit = re.sub(
            '[\-]', ' ', formatrem_torsplit
        )  #we replace the - with space so we'll get hits if differnces
        formatrem_torsplit = re.sub(
            '[\/]', ' ', formatrem_torsplit
        )  #not necessary since if has a /, should be removed in above line
        formatrem_torsplit = re.sub('\s+', ' ', formatrem_torsplit)
        logger.fdebug(
            str(len(formatrem_torsplit)) + ' - formatrem_torsplit : ' +
            formatrem_torsplit.lower())
        logger.fdebug(
            str(len(formatrem_seriesname)) + ' - formatrem_seriesname :' +
            formatrem_seriesname.lower())

        if formatrem_seriesname.lower() in formatrem_torsplit.lower() or any(
                x.lower() in formatrem_torsplit.lower() for x in AS_Alt):
            logger.fdebug('matched to : ' + tor['Title'])
            logger.fdebug('matched on series title: ' + seriesname)
            titleend = formatrem_torsplit[len(formatrem_seriesname):]
            titleend = re.sub('\-', '',
                              titleend)  #remove the '-' which is unnecessary
            #remove extensions
            titleend = re.sub('cbr', '', str(titleend))
            titleend = re.sub('cbz', '', str(titleend))
            titleend = re.sub('none', '', str(titleend))
            logger.fdebug('titleend: ' + str(titleend))

            sptitle = titleend.split()
            extra = ''
            #            for sp in sptitle:
            #                if 'v' in sp.lower() and sp[1:].isdigit():
            #                    volumeadd = sp
            #                elif 'vol' in sp.lower() and sp[3:].isdigit():
            #                    volumeadd = sp
            #                #if sp.isdigit():
            #                    #print("issue # detected : " + str(issue))
            #                elif helpers.issuedigits(issue.rstrip()) == helpers.issuedigits(sp.rstrip()):
            #                    logger.fdebug("Issue matched for : " + str(issue))
            #the title on CBT has a mix-mash of crap...ignore everything after cbz/cbr to cleanit
            ctitle = tor['Title'].find('cbr')
            if ctitle == 0:
                ctitle = tor['Title'].find('cbz')
                if ctitle == 0:
                    ctitle = tor['Title'].find('none')
                    if ctitle == 0:
                        logger.fdebug(
                            'cannot determine title properly - ignoring for now.'
                        )
                        continue
            cttitle = tor['Title'][:ctitle]
            #print("change title to : " + str(cttitle))
            #           if extra == '':
            tortheinfo.append({
                'title': rebuiltline,  #cttitle,
                'link': tor['Link'],
                'pubdate': tor['Pubdate'],
                'site': tor['Site'],
                'length': tor['Size']
            })


#                    continue
#                        #torsend2client(formatrem_seriesname,tor['Link'])
#                    else:
#                        logger.fdebug("extra info given as :" + str(extra))
#                        logger.fdebug("extra information confirmed as a match")
#                        logger.fdebug("queuing link: " + str(tor['Link']))
#                        tortheinfo.append({
#                                      'title':   cttitle, #tor['Title'],
#                                      'link':    tor['Link'],
#                                      'pubdate': tor['Pubdate'],
#                                      'site':    tor['Site'],
#                                      'length':    tor['Size']
#                                      })
#                        logger.fdebug("entered info.")
#                        continue
#torsend2client(formatrem_seriesname,tor['Link'])
#else:
#    logger.fdebug("invalid issue#: " + str(sp))
#    #extra = str(extra) + " " + str(sp)
#                else:
#                    logger.fdebug("word detected - assuming continuation of title: " + str(sp))
#                    extra = str(extra) + " " + str(sp)

    torinfo['entries'] = tortheinfo

    return torinfo
Beispiel #44
0
def foundsearch(ComicID,
                IssueID,
                mode=None,
                down=None,
                provider=None,
                SARC=None,
                IssueArcID=None):
    # When doing a Force Search (Wanted tab), the resulting search calls this to update.

    # this is all redudant code that forceRescan already does.
    # should be redone at some point so that instead of rescanning entire
    # series directory, it just scans for the issue it just downloaded and
    # and change the status to Snatched accordingly. It is not to increment the have count
    # at this stage as it's not downloaded - just the .nzb has been snatched and sent to SAB.
    myDB = db.DBConnection()

    logger.info('comicid: ' + str(ComicID))
    logger.info('issueid: ' + str(IssueID))
    if mode != 'story_arc':
        comic = myDB.action('SELECT * FROM comics WHERE ComicID=?',
                            [ComicID]).fetchone()
        ComicName = comic['ComicName']
        if mode == 'want_ann':
            issue = myDB.action('SELECT * FROM annuals WHERE IssueID=?',
                                [IssueID]).fetchone()
        else:
            issue = myDB.action('SELECT * FROM issues WHERE IssueID=?',
                                [IssueID]).fetchone()
        CYear = issue['IssueDate'][:4]

    else:
        issue = myDB.action('SELECT * FROM readinglist WHERE IssueArcID=?',
                            [IssueArcID]).fetchone()
        ComicName = issue['ComicName']
        CYear = issue['IssueYEAR']

    if down is None:
        # update the status to Snatched (so it won't keep on re-downloading!)
        logger.fdebug('updating status to snatched')
        logger.fdebug('provider is ' + provider)
        newValue = {"Status": "Snatched"}
        if mode == 'story_arc':
            cValue = {"IssueArcID": IssueArcID}
            snatchedupdate = {"IssueArcID": IssueArcID}
            myDB.upsert("readinglist", newValue, cValue)
            # update the snatched DB
            snatchedupdate = {
                "IssueID": IssueArcID,
                "Status": "Snatched",
                "Provider": provider
            }

        else:
            if mode == 'want_ann':
                controlValue = {"IssueID": IssueID}
                myDB.upsert("annuals", newValue, controlValue)
            else:
                controlValue = {"IssueID": IssueID}
                myDB.upsert("issues", newValue, controlValue)

            # update the snatched DB
            snatchedupdate = {
                "IssueID": IssueID,
                "Status": "Snatched",
                "Provider": provider
            }

        if mode == 'story_arc':
            IssueNum = issue['IssueNumber']
            newsnatchValues = {
                "ComicName": ComicName,
                "ComicID": 'None',
                "Issue_Number": IssueNum,
                "DateAdded": helpers.now(),
                "Status": "Snatched"
            }
        else:
            if mode == 'want_ann':
                IssueNum = "Annual " + issue['Issue_Number']
            else:
                IssueNum = issue['Issue_Number']

            newsnatchValues = {
                "ComicName": ComicName,
                "ComicID": ComicID,
                "Issue_Number": IssueNum,
                "DateAdded": helpers.now(),
                "Status": "Snatched"
            }
        myDB.upsert("snatched", newsnatchValues, snatchedupdate)
        logger.info("updated the snatched.")
    else:
        logger.info("updating the downloaded.")
        if mode == 'want_ann':
            IssueNum = "Annual " + issue['Issue_Number']
        elif mode == 'story_arc':
            IssueNum = issue['IssueNumber']
            IssueID = IssueArcID
        else:
            IssueNum = issue['Issue_Number']

        snatchedupdate = {
            "IssueID": IssueID,
            "Status": "Downloaded",
            "Provider": provider
        }
        newsnatchValues = {
            "ComicName": ComicName,
            "ComicID": ComicID,
            "Issue_Number": IssueNum,
            "DateAdded": helpers.now(),
            "Status": "Downloaded"
        }
        myDB.upsert("snatched", newsnatchValues, snatchedupdate)

        if mode == 'story_arc':
            cValue = {"IssueArcID": IssueArcID}
            nValue = {"Status": "Downloaded"}
            myDB.upsert("readinglist", nValue, cValue)

        else:
            controlValue = {"IssueID": IssueID}
            newValue = {"Status": "Downloaded"}

            myDB.upsert("issues", newValue, controlValue)

    #print ("finished updating snatched db.")
    logger.info('Updating now complete for ' + ComicName + ' issue: ' +
                str(IssueNum))
    return
Beispiel #45
0
    def sender(self, filename, test=False):
        if mylar.CONFIG.NZBGET_PRIORITY:
            if any([
                    mylar.CONFIG.NZBGET_PRIORITY == 'Default',
                    mylar.CONFIG.NZBGET_PRIORITY == 'Normal'
            ]):
                nzbgetpriority = 0
            elif mylar.CONFIG.NZBGET_PRIORITY == 'Low':
                nzbgetpriority = -50
            elif mylar.CONFIG.NZBGET_PRIORITY == 'High':
                nzbgetpriority = 50
            elif mylar.CONFIG.NZBGET_PRIORITY == 'Very High':
                nzbgetpriority = 100
            elif mylar.CONFIG.NZBGET_PRIORITY == 'Force':
                nzbgetpriority = 900
            #there's no priority for "paused", so set "Very Low" and deal with that later...
            elif mylar.CONFIG.NZBGET_PRIORITY == 'Paused':
                nzbgetpriority = -100
        else:
            #if nzbget priority isn't selected, default to Normal (0)
            nzbgetpriority = 0

        with open(filename, 'rb') as in_file:
            nzbcontent = in_file.read()
            nzbcontent64 = standard_b64encode(nzbcontent).decode('utf-8')

        try:
            logger.fdebug('sending now to %s' % self.display_url)
            if mylar.CONFIG.NZBGET_CATEGORY is None:
                nzb_category = ''
            else:
                nzb_category = mylar.CONFIG.NZBGET_CATEGORY
            sendresponse = self.server.append(filename, nzbcontent64,
                                              nzb_category, nzbgetpriority,
                                              False, False, '', 0, 'SCORE')
        except http.client.socket.error as e:
            nzb_url = re.sub(mylar.CONFIG.NZBGET_PASSWORD, 'REDACTED', str(e))
            logger.error(
                'Please check your NZBget host and port (if it is running). Tested against: %s'
                % nzb_url)
            return {'status': False}
        except xmlrpc.client.ProtocolError as e:
            logger.info(e, )
            if e.errmsg == "Unauthorized":
                err = re.sub(mylar.CONFIG.NZBGET_PASSWORD, 'REDACTED',
                             e.errmsg)
                logger.error('Unauthorized username / password provided: %s' %
                             err)
                return {'status': False}
            else:
                err = "Protocol Error: %s" % re.sub(
                    mylar.CONFIG.NZBGET_PASSWORD, 'REDACTED', e.errmsg)
                logger.error('Protocol error returned: %s' % err)
                return {'status': False}
        except Exception as e:
            logger.warn(
                'uh-oh: %s' %
                re.sub(mylar.CONFIG.NZBGET_PASSWORD, 'REDACTED', str(e)))
            return {'status': False}
        else:
            if sendresponse <= 0:
                logger.warn(
                    'Invalid response received after sending to NZBGet: %s' %
                    sendresponse)
                return {'status': False}
            else:
                #sendresponse is the NZBID that we use to track the progress....
                return {'status': True, 'NZBID': sendresponse}
Beispiel #46
0
def forceRescan(ComicID, archive=None):
    myDB = db.DBConnection()
    # file check to see if issue exists
    rescan = myDB.action('SELECT * FROM comics WHERE ComicID=?',
                         [ComicID]).fetchone()
    logger.info('Now checking files for ' + rescan['ComicName'] + ' (' +
                str(rescan['ComicYear']) + ') in ' + rescan['ComicLocation'])
    if archive is None:
        fc = filechecker.listFiles(dir=rescan['ComicLocation'],
                                   watchcomic=rescan['ComicName'],
                                   Publisher=rescan['ComicPublisher'],
                                   AlternateSearch=rescan['AlternateSearch'])
    else:
        fc = filechecker.listFiles(dir=archive,
                                   watchcomic=rescan['ComicName'],
                                   Publisher=rescan['ComicPublisher'],
                                   AlternateSearch=rescan['AlternateSearch'])
    iscnt = rescan['Total']
    havefiles = 0
    if mylar.ANNUALS_ON:
        an_cnt = myDB.action("SELECT COUNT(*) FROM annuals WHERE ComicID=?",
                             [ComicID]).fetchall()
        anncnt = an_cnt[0][0]
    else:
        anncnt = 0
    fccnt = int(fc['comiccount'])
    issnum = 1
    fcnew = []
    fn = 0
    issuedupechk = []
    annualdupechk = []
    issueexceptdupechk = []
    reissues = myDB.action('SELECT * FROM issues WHERE ComicID=?',
                           [ComicID]).fetchall()
    issID_to_ignore = []
    issID_to_ignore.append(str(ComicID))
    while (fn < fccnt):
        haveissue = "no"
        issuedupe = "no"
        try:
            tmpfc = fc['comiclist'][fn]
        except IndexError:
            logger.fdebug(
                'Unable to properly retrieve a file listing for the given series.'
            )
            logger.fdebug(
                'Probably because the filenames being scanned are not in a parseable format'
            )
            if fn == 0:
                return
            else:
                break
        temploc = tmpfc['JusttheDigits'].replace('_', ' ')

        #        temploc = tmpfc['ComicFilename'].replace('_', ' ')
        temploc = re.sub('[\#\']', '', temploc)
        logger.fdebug('temploc: ' + str(temploc))
        if 'annual' not in temploc.lower():
            #remove the extension here
            extensions = ('.cbr', '.cbz')
            if temploc.lower().endswith(extensions):
                logger.fdebug('removed extension for issue: ' + str(temploc))
                temploc = temploc[:-4]
#            deccnt = str(temploc).count('.')
#            if deccnt > 1:
#logger.fdebug('decimal counts are :' + str(deccnt))
#if the file is formatted with '.' in place of spaces we need to adjust.
#before replacing - check to see if digits on either side of decimal and if yes, DON'T REMOVE
#                occur=1
#                prevstart = 0
#                digitfound = "no"
#                decimalfound = "no"
#                tempreconstruct = ''
#                while (occur <= deccnt):
#                    n = occur
#                    start = temploc.find('.')
#                    while start >=0 and n > 1:
#                        start = temploc.find('.', start+len('.'))
#                        n-=1
#                    #logger.fdebug('occurance ' + str(occur) + ' of . at position: ' + str(start))
#                    if temploc[prevstart:start].isdigit():
#                        if digitfound == "yes":
#                            #logger.fdebug('this is a decimal, assuming decimal issue.')
#                            decimalfound = "yes"
#                            reconst = "." + temploc[prevstart:start] + " "
#                        else:
#                            #logger.fdebug('digit detected.')
#                            digitfound = "yes"
#                            reconst = temploc[prevstart:start]
#                    else:
#                        reconst = temploc[prevstart:start] + " "
#                    #logger.fdebug('word: ' + reconst)
#                    tempreconstruct = tempreconstruct + reconst
#                    #logger.fdebug('tempreconstruct is : ' + tempreconstruct)
#                    prevstart = (start+1)
#                    occur+=1
#                #logger.fdebug('word: ' + temploc[prevstart:])
#                tempreconstruct = tempreconstruct + " " + temploc[prevstart:]
#                #logger.fdebug('final filename to use is : ' + str(tempreconstruct))
#                temploc = tempreconstruct
#logger.fdebug("checking " + str(temploc))
#fcnew_b4 = shlex.split(str(temploc))
            fcnew_af = re.findall('[^\()]+', temploc)
            fcnew = shlex.split(fcnew_af[0])

            fcn = len(fcnew)
            n = 0
            while (n <= iscnt):
                som = 0
                try:
                    reiss = reissues[n]
                except IndexError:
                    break
#                int_iss, iss_except = helpers.decimal_issue(reiss['Issue_Number'])
                int_iss = helpers.issuedigits(reiss['Issue_Number'])
                issyear = reiss['IssueDate'][:4]
                old_status = reiss['Status']
                issname = reiss['IssueName']
                #logger.fdebug('integer_issue:' + str(int_iss) + ' ... status: ' + str(old_status))

                #if comic in format of "SomeSeries 5(c2c)(2013).cbr" whatever...it'll die.
                #can't distinguish the 5(c2c) to tell it's the issue #...
                fnd_iss_except = 'None'
                #print ("Issue, int_iss, iss_except: " + str(reiss['Issue_Number']) + "," + str(int_iss) + "," + str(iss_except))

                while (som < fcn):
                    #counts get buggered up when the issue is the last field in the filename - ie. '50.cbr'
                    #logger.fdebug('checking word - ' + str(fcnew[som]))
                    if ".cbr" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace(".cbr", "")
                    elif ".cbz" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace(".cbz", "")
                    if "(c2c)" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace("(c2c)", " ")
                        get_issue = shlex.split(str(fcnew[som]))
                        if fcnew[som] != " ":
                            fcnew[som] = get_issue[0]

                    if som + 1 < len(fcnew) and len(fcnew[som + 1]) == 2:
                        #print "fcnew[som+1]: " + str(fcnew[som+1])
                        #print "fcnew[som]: " + str(fcnew[som])
                        if 'au' in fcnew[som + 1].lower():
                            #if the 'AU' is in 005AU vs 005 AU it will yield different results.
                            fcnew[som] = fcnew[som] + 'AU'
                            fcnew[som + 1] = '93939999919190933'
                            logger.info(
                                'AU Detected seperate from issue - combining and continuing'
                            )
                        elif 'ai' in fcnew[som + 1].lower():
                            #if the 'AI' is in 005AI vs 005 AI it will yield different results.
                            fcnew[som] = fcnew[som] + 'AI'
                            fcnew[som + 1] = '93939999919190933'
                            logger.info(
                                'AI Detected seperate from issue - combining and continuing'
                            )

                    #sometimes scanners refuse to use spaces between () and lump the issue right at the start
                    #mylar assumes it's all one word in this case..let's dump the brackets.

                    fcdigit = helpers.issuedigits(fcnew[som])

                    #logger.fdebug("fcdigit: " + str(fcdigit))
                    #logger.fdebug("int_iss: " + str(int_iss))

                    if int(fcdigit) == int_iss:
                        logger.fdebug('issue match - fcdigit: ' +
                                      str(fcdigit) + ' ... int_iss: ' +
                                      str(int_iss))

                        if '-' in temploc and temploc.find(
                                reiss['Issue_Number']) > temploc.find('-'):
                            logger.fdebug(
                                'I have detected a possible Title in the filename'
                            )
                            logger.fdebug(
                                'the issue # has occured after the -, so I assume that it is part of the Title'
                            )
                            break
                        for d in issuedupechk:
                            if int(d['fcdigit']) == int(fcdigit):
                                logger.fdebug(
                                    'duplicate issue detected - not counting this: '
                                    + str(tmpfc['ComicFilename']))
                                logger.fdebug('is a duplicate of ' +
                                              d['filename'])
                                logger.fdebug('fcdigit:' + str(fcdigit) +
                                              ' === dupedigit: ' +
                                              str(d['fcdigit']))
                                issuedupe = "yes"
                                break
                        if issuedupe == "no":
                            logger.fdebug('matched...issue: ' +
                                          rescan['ComicName'] + '#' +
                                          str(reiss['Issue_Number']) +
                                          ' --- ' + str(int_iss))
                            havefiles += 1
                            haveissue = "yes"
                            isslocation = str(tmpfc['ComicFilename'])
                            issSize = str(tmpfc['ComicSize'])
                            logger.fdebug('.......filename: ' +
                                          str(isslocation))
                            logger.fdebug('.......filesize: ' +
                                          str(tmpfc['ComicSize']))
                            # to avoid duplicate issues which screws up the count...let's store the filename issues then
                            # compare earlier...
                            issuedupechk.append({
                                'fcdigit':
                                int(fcdigit),
                                'filename':
                                tmpfc['ComicFilename']
                            })
                        break
                        #else:
                        # if the issue # matches, but there is no year present - still match.
                        # determine a way to match on year if present, or no year (currently).

                    if issuedupe == "yes":
                        logger.fdebug(
                            'I should break out here because of a dupe.')
                        break
                    som += 1
                if haveissue == "yes" or issuedupe == "yes": break
                n += 1
        else:
            # annual inclusion here.
            #logger.fdebug("checking " + str(temploc))
            reannuals = myDB.action('SELECT * FROM annuals WHERE ComicID=?',
                                    [ComicID]).fetchall()
            fcnew = shlex.split(str(temploc))
            fcn = len(fcnew)
            n = 0
            reann = None
            while (n < anncnt):
                som = 0
                try:
                    reann = reannuals[n]
                except IndexError:
                    break
                int_iss, iss_except = helpers.decimal_issue(
                    reann['Issue_Number'])
                issyear = reann['IssueDate'][:4]
                old_status = reann['Status']
                while (som < fcn):
                    #counts get buggered up when the issue is the last field in the filename - ie. '50$
                    #logger.fdebug('checking word - ' + str(fcnew[som]))
                    if ".cbr" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace(".cbr", "")
                    elif ".cbz" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace(".cbz", "")
                    if "(c2c)" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace("(c2c)", " ")
                        get_issue = shlex.split(str(fcnew[som]))
                        if fcnew[som] != " ":
                            fcnew[som] = get_issue[0]
                    if fcnew[som].lower() == 'annual':
                        logger.fdebug('Annual detected.')
                        if fcnew[som + 1].isdigit():
                            ann_iss = fcnew[som + 1]
                            logger.fdebug('Annual # ' + str(ann_iss) +
                                          ' detected.')
                            fcdigit = helpers.issuedigits(ann_iss)
                    logger.fdebug('fcdigit:' + str(fcdigit))
                    logger.fdebug('int_iss:' + str(int_iss))
                    if int(fcdigit) == int_iss:
                        logger.fdebug('annual match - issue : ' + str(int_iss))
                        for d in annualdupechk:
                            if int(d['fcdigit']) == int(fcdigit):
                                logger.fdebug(
                                    'duplicate annual issue detected - not counting this: '
                                    + str(tmpfc['ComicFilename']))
                                issuedupe = "yes"
                                break
                        if issuedupe == "no":
                            logger.fdebug('matched...annual issue: ' +
                                          rescan['ComicName'] + '#' +
                                          str(reann['Issue_Number']) +
                                          ' --- ' + str(int_iss))
                            havefiles += 1
                            haveissue = "yes"
                            isslocation = str(tmpfc['ComicFilename'])
                            issSize = str(tmpfc['ComicSize'])
                            logger.fdebug('.......filename: ' +
                                          str(isslocation))
                            logger.fdebug('.......filesize: ' +
                                          str(tmpfc['ComicSize']))
                            # to avoid duplicate issues which screws up the count...let's store the filename issues then
                            # compare earlier...
                            annualdupechk.append({'fcdigit': int(fcdigit)})
                        break
                    som += 1
                if haveissue == "yes": break
                n += 1

        if issuedupe == "yes": pass
        else:
            #we have the # of comics, now let's update the db.
            #even if we couldn't find the physical issue, check the status.
            #-- if annuals aren't enabled, this will bugger out.
            writeit = True
            if mylar.ANNUALS_ON:
                if 'annual' in temploc.lower():
                    if reann is None:
                        logger.fdebug(
                            'Annual present in location, but series does not have any annuals attached to it - Ignoring'
                        )
                        writeit = False
                    else:
                        iss_id = reann['IssueID']
                else:
                    iss_id = reiss['IssueID']
            else:
                if 'annual' in temploc.lower():
                    logger.fdebug(
                        'Annual support not enabled, but annual issue present within directory. Ignoring annual.'
                    )
                    writeit = False
                else:
                    iss_id = reiss['IssueID']

            if writeit == True:
                logger.fdebug('issueID to write to db:' + str(iss_id))
                controlValueDict = {"IssueID": iss_id}

                #if Archived, increase the 'Have' count.
                #if archive:
                #    issStatus = "Archived"

                if haveissue == "yes":
                    issStatus = "Downloaded"
                    newValueDict = {
                        "Location": isslocation,
                        "ComicSize": issSize,
                        "Status": issStatus
                    }

                    issID_to_ignore.append(str(iss_id))

                    if 'annual' in temploc.lower():
                        myDB.upsert("annuals", newValueDict, controlValueDict)
                    else:
                        myDB.upsert("issues", newValueDict, controlValueDict)
        fn += 1

    logger.fdebug('IssueID to ignore: ' + str(issID_to_ignore))

    #here we need to change the status of the ones we DIDN'T FIND above since the loop only hits on FOUND issues.
    update_iss = []
    tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(
        seq=','.join(['?'] * (len(issID_to_ignore) - 1)))
    chkthis = myDB.action(tmpsql, issID_to_ignore).fetchall()
    #    chkthis = None
    if chkthis is None:
        pass
    else:
        for chk in chkthis:
            old_status = chk['Status']
            #logger.fdebug('old_status:' + str(old_status))
            if old_status == "Skipped":
                if mylar.AUTOWANT_ALL:
                    issStatus = "Wanted"
                else:
                    issStatus = "Skipped"
            elif old_status == "Archived":
                issStatus = "Archived"
            elif old_status == "Downloaded":
                issStatus = "Archived"
            elif old_status == "Wanted":
                issStatus = "Wanted"
            elif old_status == "Ignored":
                issStatus = "Ignored"
            elif old_status == "Snatched":  #this is needed for torrents, or else it'll keep on queuing..
                issStatus = "Snatched"
            else:
                issStatus = "Skipped"

            #logger.fdebug("new status: " + str(issStatus))

            update_iss.append({"IssueID": chk['IssueID'], "Status": issStatus})

    if len(update_iss) > 0:
        i = 0
        #do it like this to avoid DB locks...
        for ui in update_iss:
            controlValueDict = {"IssueID": ui['IssueID']}
            newStatusValue = {"Status": ui['Status']}
            myDB.upsert("issues", newStatusValue, controlValueDict)
            i += 1
        logger.info('Updated the status of ' + str(i) + ' issues for ' +
                    rescan['ComicName'] + ' (' + str(rescan['ComicYear']) +
                    ') that were not found.')

    logger.info('Total files located: ' + str(havefiles))
    foundcount = havefiles
    arcfiles = 0
    arcanns = 0
    # if filechecker returns 0 files (it doesn't find any), but some issues have a status of 'Archived'
    # the loop below won't work...let's adjust :)
    arcissues = myDB.action(
        "SELECT count(*) FROM issues WHERE ComicID=? and Status='Archived'",
        [ComicID]).fetchall()
    if int(arcissues[0][0]) > 0:
        arcfiles = arcissues[0][0]
    arcannuals = myDB.action(
        "SELECT count(*) FROM annuals WHERE ComicID=? and Status='Archived'",
        [ComicID]).fetchall()
    if int(arcissues[0][0]) > 0:
        arcanns = arcannuals[0][0]

    if arcfiles > 0 and arcanns > 0:
        arcfiles = arcfiles + arcanns
        havefiles = havefiles + arcfiles
        logger.fdebug('Adjusting have total to ' + str(havefiles) +
                      ' because of this many archive files:' + str(arcfiles))

    ignorecount = 0
    if mylar.IGNORE_HAVETOTAL:  # if this is enabled, will increase Have total as if in Archived Status
        ignores = myDB.action(
            "SELECT count(*) FROM issues WHERE ComicID=? AND Status='Ignored'",
            [ComicID]).fetchall()
        if int(ignores[0][0]) > 0:
            ignorecount = ignores[0][0]
            havefiles = havefiles + ignorecount
            logger.fdebug('Adjusting have total to ' + str(havefiles) +
                          ' because of this many Ignored files:' +
                          str(ignorecount))

    #now that we are finished...
    #adjust for issues that have been marked as Downloaded, but aren't found/don't exist.
    #do it here, because above loop only cycles though found comics using filechecker.
    downissues = myDB.select(
        "SELECT * FROM issues WHERE ComicID=? and Status='Downloaded'",
        [ComicID])
    downissues += myDB.select(
        "SELECT * FROM annuals WHERE ComicID=? and Status='Downloaded'",
        [ComicID])
    if downissues is None:
        pass
    else:
        archivedissues = 0  #set this to 0 so it tallies correctly.
        for down in downissues:
            #print "downlocation:" + str(down['Location'])
            #remove special characters from
            #temploc = rescan['ComicLocation'].replace('_', ' ')
            #temploc = re.sub('[\#\'\/\.]', '', temploc)
            #print ("comiclocation: " + str(rescan['ComicLocation']))
            #print ("downlocation: " + str(down['Location']))
            if down['Location'] is None:
                logger.fdebug(
                    'location does not exist which means file was not downloaded successfully, or was moved.'
                )
                controlValue = {"IssueID": down['IssueID']}
                newValue = {"Status": "Archived"}
                myDB.upsert("issues", newValue, controlValue)
                archivedissues += 1
                pass
            else:
                comicpath = os.path.join(rescan['ComicLocation'],
                                         down['Location'])
                if os.path.exists(comicpath):
                    pass
                    #print "Issue exists - no need to change status."
                else:
                    #print "Changing status from Downloaded to Archived - cannot locate file"
                    controlValue = {"IssueID": down['IssueID']}
                    newValue = {"Status": "Archived"}
                    myDB.upsert("issues", newValue, controlValue)
                    archivedissues += 1
        totalarc = arcfiles + archivedissues
        havefiles = havefiles + archivedissues  #arcfiles already tallied in havefiles in above segment
        logger.fdebug(
            'I have changed the status of ' + str(archivedissues) +
            ' issues to a status of Archived, as I now cannot locate them in the series directory.'
        )

    #let's update the total count of comics that was found.
    controlValueStat = {"ComicID": rescan['ComicID']}
    newValueStat = {"Have": havefiles}

    combined_total = rescan['Total'] + anncnt

    myDB.upsert("comics", newValueStat, controlValueStat)
    logger.info('I have physically found ' + str(foundcount) +
                ' issues, ignored ' + str(ignorecount) +
                ' issues, and accounted for ' + str(totalarc) +
                ' in an Archived state. Total Issue Count: ' + str(havefiles) +
                ' / ' + str(combined_total))

    return
Beispiel #47
0
    def processor(self, nzbinfo):
        nzbid = nzbinfo['NZBID']
        try:
            logger.fdebug(
                'Now checking the active queue of nzbget for the download')
            queueinfo = self.server.listgroups()
        except Exception as e:
            logger.warn(
                'Error attempting to retrieve active queue listing: %s' % e)
            return {'status': False}
        else:
            logger.fdebug('valid queue result returned. Analyzing...')
            queuedl = [qu for qu in queueinfo if qu['NZBID'] == nzbid]
            if len(queuedl) == 0:
                logger.warn(
                    'Unable to locate NZBID %s in active queue. Could it be finished already ?'
                    % nzbid)
                return self.historycheck(nzbinfo)

            stat = False
            double_pp = False
            double_type = None
            while stat is False:
                time.sleep(10)
                queueinfo = self.server.listgroups()
                queuedl = [qu for qu in queueinfo if qu['NZBID'] == nzbid]
                if len(queuedl) == 0:
                    logger.fdebug(
                        'Item is no longer in active queue. It should be finished by my calculations'
                    )
                    stat = True
                else:
                    if 'comicrn' in queuedl[0]['PostInfoText'].lower():
                        double_pp = True
                        double_type = 'ComicRN'
                    elif 'nzbtomylar' in queuedl[0]['PostInfoText'].lower():
                        double_pp = True
                        double_type = 'nzbToMylar'

                    if all([
                            len(queuedl[0]['ScriptStatuses']) > 0,
                            double_pp is False
                    ]):
                        for x in queuedl[0]['ScriptStatuses']:
                            if 'comicrn' in x['Name'].lower():
                                double_pp = True
                                double_type = 'ComicRN'
                                break
                            elif 'nzbtomylar' in x['Name'].lower():
                                double_pp = True
                                double_type = 'nzbToMylar'
                                break

                    if all([
                            len(queuedl[0]['Parameters']) > 0,
                            double_pp is False
                    ]):
                        for x in queuedl[0]['Parameters']:
                            if all([
                                    'comicrn' in x['Name'].lower(),
                                    x['Value'] == 'yes'
                            ]):
                                double_pp = True
                                double_type = 'ComicRN'
                                break
                            elif all([
                                    'nzbtomylar' in x['Name'].lower(),
                                    x['Value'] == 'yes'
                            ]):
                                double_pp = True
                                double_type = 'nzbToMylar'
                                break

                    if double_pp is True:
                        logger.warn(
                            '%s has been detected as being active for this category & download. Completed Download Handling will NOT be performed due to this.'
                            % double_type)
                        logger.warn(
                            'Either disable Completed Download Handling for NZBGet within Mylar, or remove %s from your category script in NZBGet.'
                            % double_type)
                        return {'status': 'double-pp', 'failed': False}

                    logger.fdebug('status: %s' % queuedl[0]['Status'])
                    logger.fdebug('name: %s' % queuedl[0]['NZBName'])
                    logger.fdebug('FileSize: %sMB' % queuedl[0]['FileSizeMB'])
                    logger.fdebug('Download Left: %sMB' %
                                  queuedl[0]['RemainingSizeMB'])
                    logger.fdebug('health: %s' % (queuedl[0]['Health'] / 10))
                    logger.fdebug('destination: %s' % queuedl[0]['DestDir'])

            logger.fdebug('File has now downloaded!')
            time.sleep(
                5
            )  #wait some seconds so shit can get written to history properly
            return self.historycheck(nzbinfo)
Beispiel #48
0
def storyarcinfo(xmlid):

    comicLibrary = listStoryArcs()

    arcinfo = {}

    if mylar.CONFIG.COMICVINE_API == 'None' or mylar.CONFIG.COMICVINE_API is None:
        logger.warn('You have not specified your own ComicVine API key - this is a requirement. Get your own @ http://api.comicvine.com.')
        return
    else:
        comicapi = mylar.CONFIG.COMICVINE_API

    #respawn to the exact id for the story arc and count the # of issues present.
    ARCPULL_URL = mylar.CVURL + 'story_arc/4045-' + str(xmlid) + '/?api_key=' + str(comicapi) + '&field_list=issues,publisher,name,first_appeared_in_issue,deck,image&format=xml&offset=0'
    #logger.fdebug('arcpull_url:' + str(ARCPULL_URL))

    #new CV API restriction - one api request / second.
    if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2:
        time.sleep(2)
    else:
        time.sleep(mylar.CONFIG.CVAPI_RATE)

    #download the file:
    payload = None

    try:
        r = requests.get(ARCPULL_URL, params=payload, verify=mylar.CONFIG.CV_VERIFY, headers=mylar.CV_HEADERS)
    except Exception as e:
        logger.warn('While parsing data from ComicVine, got exception: %s' % e)
        return

    try:
        arcdom = parseString(r.content)
    except ExpatError:
        if '<title>Abnormal Traffic Detected' in r.content:
            logger.error('ComicVine has banned this server\'s IP address because it exceeded the API rate limit.')
        else:
            logger.warn('While parsing data from ComicVine, got exception: %s for data: %s' % (e, r.content))
        return
    except Exception as e:
        logger.warn('While parsing data from ComicVine, got exception: %s for data: %s' % (e, r.content))
        return

    try:
        logger.fdebug('story_arc ascension')
        issuedom = arcdom.getElementsByTagName('issue')
        issuecount = len( issuedom ) #arcdom.getElementsByTagName('issue') )
        isc = 0
        arclist = ''
        ordernum = 1
        for isd in issuedom:
            zeline = isd.getElementsByTagName('id')
            isdlen = len( zeline )
            isb = 0
            while ( isb < isdlen):
                if isc == 0:
                    arclist = str(zeline[isb].firstChild.wholeText).strip() + ',' + str(ordernum)
                else:
                    arclist += '|' + str(zeline[isb].firstChild.wholeText).strip() + ',' + str(ordernum)
                ordernum+=1 
                isb+=1

            isc+=1

    except:
        logger.fdebug('unable to retrive issue count - nullifying value.')
        issuecount = 0

    try:
        firstid = None
        arcyear = None
        fid = len ( arcdom.getElementsByTagName('id') )
        fi = 0
        while (fi < fid):
            if arcdom.getElementsByTagName('id')[fi].parentNode.nodeName == 'first_appeared_in_issue':
                if not arcdom.getElementsByTagName('id')[fi].firstChild.wholeText == xmlid:
                    logger.fdebug('hit it.')
                    firstid = arcdom.getElementsByTagName('id')[fi].firstChild.wholeText
                    break # - dont' break out here as we want to gather ALL the issue ID's since it's here
            fi+=1
        logger.fdebug('firstid: ' + str(firstid))
        if firstid is not None:
            firstdom = cv.pulldetails(comicid=None, type='firstissue', issueid=firstid)
            logger.fdebug('success')
            arcyear = cv.Getissue(firstid,firstdom,'firstissue')
    except:
        logger.fdebug('Unable to retrieve first issue details. Not caclulating at this time.')

    try:
        xmlimage = arcdom.getElementsByTagName('super_url')[0].firstChild.wholeText
    except:
        xmlimage = "cache/blankcover.jpg"

    try:
        xmldesc = arcdom.getElementsByTagName('desc')[0].firstChild.wholeText
    except:
        xmldesc = "None"

    try:
        xmlpub = arcdom.getElementsByTagName('publisher')[0].firstChild.wholeText
    except:
        xmlpub = "None"

    try:
        xmldeck = arcdom.getElementsByTagName('deck')[0].firstChild.wholeText
    except:
        xmldeck = "None"

    if xmlid in comicLibrary:
        haveit = comicLibrary[xmlid]
    else:
        haveit = "No"

    arcinfo = {
            #'name':                 xmlTag,    #theese four are passed into it only when it's a new add
            #'url':                  xmlurl,    #needs to be modified for refreshing to work completely.
            #'publisher':            xmlpub,
            'comicyear':            arcyear,
            'comicid':              xmlid,
            'issues':               issuecount,
            'comicimage':           xmlimage,
            'description':          xmldesc,
            'deck':                 xmldeck,
            'arclist':              arclist,
            'haveit':               haveit,
            'publisher':            xmlpub
            }

    return arcinfo
Beispiel #49
0
    def rename_file(self,
                    ofilename,
                    issue=None,
                    annualize=None,
                    arc=False,
                    file_format=None
                    ):  #comicname, issue, comicyear=None, issueid=None)
        comicid = self.comicid  # it's coming in unicoded...
        issueid = self.issueid

        if file_format is None:
            file_format = mylar.CONFIG.FILE_FORMAT

        logger.fdebug(type(comicid))
        logger.fdebug(type(issueid))
        logger.fdebug('comicid: %s' % comicid)
        logger.fdebug('issue# as per cv: %s' % issue)
        logger.fdebug('issueid:' + str(issueid))

        if issueid is None:
            logger.fdebug('annualize is ' + str(annualize))
            if arc:
                #this has to be adjusted to be able to include story arc issues that span multiple arcs
                chkissue = self.myDB.selectone(
                    "SELECT * from storyarcs WHERE ComicID=? AND Issue_Number=?",
                    [comicid, issue]).fetchone()
            else:
                chkissue = self.myDB.selectone(
                    "SELECT * from issues WHERE ComicID=? AND Issue_Number=?",
                    [comicid, issue]).fetchone()
                if all([
                        chkissue is None, annualize is None,
                        not mylar.CONFIG.ANNUALS_ON
                ]):
                    chkissue = self.myDB.selectone(
                        "SELECT * from annuals WHERE ComicID=? AND Issue_Number=?",
                        [comicid, issue]).fetchone()

            if chkissue is None:
                #rechk chkissue against int value of issue #
                if arc:
                    chkissue = self.myDB.selectone(
                        "SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?",
                        [comicid, issuedigits(issue)]).fetchone()
                else:
                    chkissue = self.myDB.selectone(
                        "SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?",
                        [comicid, issuedigits(issue)]).fetchone()
                    if all([
                            chkissue is None, annualize == 'yes',
                            mylar.CONFIG.ANNUALS_ON
                    ]):
                        chkissue = self.myDB.selectone(
                            "SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?",
                            [comicid, issuedigits(issue)]).fetchone()

                if chkissue is None:
                    logger.error('Invalid Issue_Number - please validate.')
                    return
                else:
                    logger.info(
                        'Int Issue_number compare found. continuing...')
                    issueid = chkissue['IssueID']
            else:
                issueid = chkissue['IssueID']

        #use issueid to get publisher, series, year, issue number
        logger.fdebug('issueid is now : ' + str(issueid))
        if arc:
            issueinfo = self.myDB.selectone(
                "SELECT * from storyarcs WHERE ComicID=? AND IssueID=? AND StoryArc=?",
                [comicid, issueid, arc]).fetchone()
        else:
            issueinfo = self.myDB.selectone(
                "SELECT * from issues WHERE ComicID=? AND IssueID=?",
                [comicid, issueid]).fetchone()
            if issueinfo is None:
                logger.fdebug('not an issue, checking against annuals')
                issueinfo = self.myDB.selectone(
                    "SELECT * from annuals WHERE ComicID=? AND IssueID=?",
                    [comicid, issueid]).fetchone()
                if issueinfo is None:
                    logger.fdebug(
                        'Unable to rename - cannot locate issue id within db')
                    return
                else:
                    annualize = True

        if issueinfo is None:
            logger.fdebug(
                'Unable to rename - cannot locate issue id within db')
            return

        #remap the variables to a common factor.
        if arc:
            issuenum = issueinfo['IssueNumber']
            issuedate = issueinfo['IssueDate']
            publisher = issueinfo['IssuePublisher']
            series = issueinfo['ComicName']
            seriesfilename = series  #Alternate FileNaming is not available with story arcs.
            seriesyear = issueinfo['SeriesYear']
            arcdir = helpers.filesafe(issueinfo['StoryArc'])
            if mylar.CONFIG.REPLACE_SPACES:
                arcdir = arcdir.replace(' ', mylar.CONFIG.REPLACE_CHAR)
            if mylar.CONFIG.STORYARCDIR:
                storyarcd = os.path.join(mylar.CONFIG.DESTINATION_DIR,
                                         "StoryArcs", arcdir)
                logger.fdebug('Story Arc Directory set to : ' + storyarcd)
            else:
                logger.fdebug('Story Arc Directory set to : ' +
                              mylar.CONFIG.GRABBAG_DIR)
                storyarcd = os.path.join(mylar.CONFIG.DESTINATION_DIR,
                                         mylar.CONFIG.GRABBAG_DIR)

            comlocation = storyarcd
            comversion = None  #need to populate this.

        else:
            issuenum = issueinfo['Issue_Number']
            issuedate = issueinfo['IssueDate']
            publisher = self.comic['ComicPublisher']
            series = self.comic['ComicName']
            if self.comic['AlternateFileName'] is None or self.comic[
                    'AlternateFileName'] == 'None':
                seriesfilename = series
            else:
                seriesfilename = self.comic['AlternateFileName']
                logger.fdebug(
                    'Alternate File Naming has been enabled for this series. Will rename series title to : '
                    + seriesfilename)
            seriesyear = self.comic['ComicYear']
            comlocation = self.comic['ComicLocation']
            comversion = self.comic['ComicVersion']

        unicodeissue = issuenum

        if type(issuenum) == str:
            vals = {
                '\xbd': '.5',
                '\xbc': '.25',
                '\xbe': '.75',
                '\u221e': '9999999999',
                '\xe2': '9999999999'
            }
        else:
            vals = {
                '\xbd': '.5',
                '\xbc': '.25',
                '\xbe': '.75',
                '\\u221e': '9999999999',
                '\xe2': '9999999999'
            }
        x = [vals[key] for key in vals if key in issuenum]
        if x:
            issuenum = x[0]
            logger.fdebug('issue number formatted: %s' % issuenum)

        #comicid = issueinfo['ComicID']
        #issueno = str(issuenum).split('.')[0]
        issue_except = 'None'
        issue_exceptions = [
            'AU', 'INH', 'NOW', 'AI', 'MU', 'A', 'B', 'C', 'X', 'O'
        ]
        valid_spaces = ('.', '-')
        for issexcept in issue_exceptions:
            if issexcept.lower() in issuenum.lower():
                logger.fdebug('ALPHANUMERIC EXCEPTION : [' + issexcept + ']')
                v_chk = [v for v in valid_spaces if v in issuenum]
                if v_chk:
                    iss_space = v_chk[0]
                    logger.fdebug('character space denoted as : ' + iss_space)
                else:
                    logger.fdebug('character space not denoted.')
                    iss_space = ''
#                    if issexcept == 'INH':
#                       issue_except = '.INH'
                if issexcept == 'NOW':
                    if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
#                       issue_except = '.NOW'

                issue_except = iss_space + issexcept
                logger.fdebug('issue_except denoted as : ' + issue_except)
                issuenum = re.sub("[^0-9]", "", issuenum)
                break

#            if 'au' in issuenum.lower() and issuenum[:1].isdigit():
#                issue_except = ' AU'
#            elif 'ai' in issuenum.lower() and issuenum[:1].isdigit():
#                issuenum = re.sub("[^0-9]", "", issuenum)
#                issue_except = ' AI'
#            elif 'inh' in issuenum.lower() and issuenum[:1].isdigit():
#                issuenum = re.sub("[^0-9]", "", issuenum)
#                issue_except = '.INH'
#            elif 'now' in issuenum.lower() and issuenum[:1].isdigit():
#                if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
#                issuenum = re.sub("[^0-9]", "", issuenum)
#                issue_except = '.NOW'
        if '.' in issuenum:
            iss_find = issuenum.find('.')
            iss_b4dec = issuenum[:iss_find]
            if iss_find == 0:
                iss_b4dec = '0'
            iss_decval = issuenum[iss_find + 1:]
            if iss_decval.endswith('.'):
                iss_decval = iss_decval[:-1]
            if int(iss_decval) == 0:
                iss = iss_b4dec
                issdec = int(iss_decval)
                issueno = iss
            else:
                if len(iss_decval) == 1:
                    iss = iss_b4dec + "." + iss_decval
                    issdec = int(iss_decval) * 10
                else:
                    iss = iss_b4dec + "." + iss_decval.rstrip('0')
                    issdec = int(iss_decval.rstrip('0')) * 10
                issueno = iss_b4dec
        else:
            iss = issuenum
            issueno = iss
        # issue zero-suppression here
        if mylar.CONFIG.ZERO_LEVEL == "0":
            zeroadd = ""
        else:
            if mylar.CONFIG.ZERO_LEVEL_N == "none": zeroadd = ""
            elif mylar.CONFIG.ZERO_LEVEL_N == "0x": zeroadd = "0"
            elif mylar.CONFIG.ZERO_LEVEL_N == "00x": zeroadd = "00"

        logger.fdebug('Zero Suppression set to : ' +
                      str(mylar.CONFIG.ZERO_LEVEL_N))
        prettycomiss = None

        if issueno.isalpha():
            logger.fdebug('issue detected as an alpha.')
            prettycomiss = str(issueno)
        else:
            try:
                x = float(issuenum)
                #validity check
                if x < 0:
                    logger.info(
                        'I\'ve encountered a negative issue #: %s. Trying to accomodate.'
                        % issueno)
                    prettycomiss = '-' + str(zeroadd) + str(issueno[1:])
                elif x == 9999999999:
                    logger.fdebug('Infinity issue found.')
                    issuenum = 'infinity'
                elif x >= 0:
                    pass
                else:
                    raise ValueError
            except ValueError as e:
                logger.warn(
                    'Unable to properly determine issue number [ %s] - you should probably log this on github for help.'
                    % issueno)
                return

        if prettycomiss is None and len(str(issueno)) > 0:
            #if int(issueno) < 0:
            #    self._log("issue detected is a negative")
            #    prettycomiss = '-' + str(zeroadd) + str(abs(issueno))
            if int(issueno) < 10:
                logger.fdebug('issue detected less than 10')
                if '.' in iss:
                    if int(iss_decval) > 0:
                        issueno = str(iss)
                        prettycomiss = str(zeroadd) + str(iss)
                    else:
                        prettycomiss = str(zeroadd) + str(int(issueno))
                else:
                    prettycomiss = str(zeroadd) + str(iss)
                if issue_except != 'None':
                    prettycomiss = str(prettycomiss) + issue_except
                logger.fdebug('Zero level supplement set to ' +
                              str(mylar.CONFIG.ZERO_LEVEL_N) +
                              '. Issue will be set as : ' + str(prettycomiss))
            elif int(issueno) >= 10 and int(issueno) < 100:
                logger.fdebug(
                    'issue detected greater than 10, but less than 100')
                if mylar.CONFIG.ZERO_LEVEL_N == "none":
                    zeroadd = ""
                else:
                    zeroadd = "0"
                if '.' in iss:
                    if int(iss_decval) > 0:
                        issueno = str(iss)
                        prettycomiss = str(zeroadd) + str(iss)
                    else:
                        prettycomiss = str(zeroadd) + str(int(issueno))
                else:
                    prettycomiss = str(zeroadd) + str(iss)
                if issue_except != 'None':
                    prettycomiss = str(prettycomiss) + issue_except
                logger.fdebug('Zero level supplement set to ' +
                              str(mylar.CONFIG.ZERO_LEVEL_N) +
                              '.Issue will be set as : ' + str(prettycomiss))
            else:
                logger.fdebug('issue detected greater than 100')
                if issuenum == 'infinity':
                    prettycomiss = 'infinity'
                else:
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                    prettycomiss = str(issueno)
                if issue_except != 'None':
                    prettycomiss = str(prettycomiss) + issue_except
                logger.fdebug('Zero level supplement set to ' +
                              str(mylar.CONFIG.ZERO_LEVEL_N) +
                              '. Issue will be set as : ' + str(prettycomiss))
        elif len(str(issueno)) == 0:
            prettycomiss = str(issueno)
            logger.fdebug(
                'issue length error - cannot determine length. Defaulting to None:  '
                + str(prettycomiss))

        logger.fdebug('Pretty Comic Issue is : ' + str(prettycomiss))
        if mylar.CONFIG.UNICODE_ISSUENUMBER:
            logger.fdebug('Setting this to Unicode format as requested: %s' %
                          prettycomiss)
            prettycomiss = unicodeissue

        issueyear = issuedate[:4]
        month = issuedate[5:7].replace('-', '').strip()
        month_name = helpers.fullmonth(month)
        if month_name is None:
            month_name = 'None'
        logger.fdebug('Issue Year : ' + str(issueyear))
        logger.fdebug('Publisher: ' + publisher)
        logger.fdebug('Series: ' + series)
        logger.fdebug('Year: ' + str(seriesyear))
        logger.fdebug('Comic Location: ' + comlocation)

        if self.comic['Corrected_Type'] is not None:
            if self.comic['Type'] != self.comic['Corrected_Type']:
                booktype = self.comic['Corrected_Type']
            else:
                booktype = self.comic['Type']
        else:
            booktype = self.comic['Type']

        if booktype == 'Print' or all(
            [booktype != 'Print', mylar.CONFIG.FORMAT_BOOKTYPE is False]):
            chunk_fb = re.sub('\$Type', '', file_format)
            chunk_b = re.compile(r'\s+')
            chunk_file_format = chunk_b.sub(' ', chunk_fb)
        else:
            chunk_file_format = file_format

        if any([comversion is None, booktype != 'Print']):
            comversion = 'None'

        #if comversion is None, remove it so it doesn't populate with 'None'
        if comversion == 'None':
            chunk_f_f = re.sub('\$VolumeN', '', chunk_file_format)
            chunk_f = re.compile(r'\s+')
            chunk_file_format = chunk_f.sub(' ', chunk_f_f)
            logger.fdebug(
                'No version # found for series, removing from filename')
            logger.fdebug("new format: " + str(chunk_file_format))

        if annualize is None:
            chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
            chunk_f = re.compile(r'\s+')
            chunk_file_format = chunk_f.sub(' ', chunk_f_f)
            logger.fdebug('not an annual - removing from filename paramaters')
            logger.fdebug('new format: ' + str(chunk_file_format))

        else:
            logger.fdebug('chunk_file_format is: ' + str(chunk_file_format))
            if mylar.CONFIG.ANNUALS_ON:
                if 'annual' in series.lower():
                    if '$Annual' not in chunk_file_format:  # and 'annual' not in ofilename.lower():
                        #if it's an annual, but $annual isn't specified in file_format, we need to
                        #force it in there, by default in the format of $Annual $Issue
                        #prettycomiss = "Annual " + str(prettycomiss)
                        logger.fdebug(
                            '[%s][ANNUALS-ON][ANNUAL IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                    else:
                        #because it exists within title, strip it then use formatting tag for placement of wording.
                        chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
                        chunk_f = re.compile(r'\s+')
                        chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                        logger.fdebug(
                            '[%s][ANNUALS-ON][ANNUAL IN SERIES][ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                else:
                    if '$Annual' not in chunk_file_format:  # and 'annual' not in ofilename.lower():
                        #if it's an annual, but $annual isn't specified in file_format, we need to
                        #force it in there, by default in the format of $Annual $Issue
                        prettycomiss = "Annual %s" % prettycomiss
                        logger.fdebug(
                            '[%s][ANNUALS-ON][ANNUAL NOT IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                    else:
                        logger.fdebug(
                            '[%s][ANNUALS-ON][ANNUAL NOT IN SERIES][ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))

            else:
                #if annuals aren't enabled, then annuals are being tracked as independent series.
                #annualize will be true since it's an annual in the seriesname.
                if 'annual' in series.lower():
                    if '$Annual' not in chunk_file_format:  # and 'annual' not in ofilename.lower():
                        #if it's an annual, but $annual isn't specified in file_format, we need to
                        #force it in there, by default in the format of $Annual $Issue
                        #prettycomiss = "Annual " + str(prettycomiss)
                        logger.fdebug(
                            '[%s][ANNUALS-OFF][ANNUAL IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                    else:
                        #because it exists within title, strip it then use formatting tag for placement of wording.
                        chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
                        chunk_f = re.compile(r'\s+')
                        chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                        logger.fdebug(
                            '[%s][ANNUALS-OFF][ANNUAL IN SERIES][ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                else:
                    if '$Annual' not in chunk_file_format:  # and 'annual' not in ofilename.lower():
                        #if it's an annual, but $annual isn't specified in file_format, we need to
                        #force it in there, by default in the format of $Annual $Issue
                        prettycomiss = "Annual %s" % prettycomiss
                        logger.fdebug(
                            '[%s][ANNUALS-OFF][ANNUAL NOT IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                    else:
                        logger.fdebug(
                            '[%s][ANNUALS-OFF][ANNUAL NOT IN SERIES][ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))

                logger.fdebug('Annual detected within series title of ' +
                              series + '. Not auto-correcting issue #')

        seriesfilename = seriesfilename  #.encode('ascii', 'ignore').strip()
        filebad = [
            ':', ',', '/', '?', '!', '\'', '\"', '\*'
        ]  #in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
        for dbd in filebad:
            if dbd in seriesfilename:
                if any([dbd == '/', dbd == '*']):
                    repthechar = '-'
                else:
                    repthechar = ''
                seriesfilename = seriesfilename.replace(dbd, repthechar)
                logger.fdebug(
                    'Altering series name due to filenaming restrictions: ' +
                    seriesfilename)

        publisher = re.sub('!', '', publisher)

        file_values = {
            '$Series': seriesfilename,
            '$Issue': prettycomiss,
            '$Year': issueyear,
            '$series': series.lower(),
            '$Publisher': publisher,
            '$publisher': publisher.lower(),
            '$VolumeY': 'V' + str(seriesyear),
            '$VolumeN': comversion,
            '$monthname': month_name,
            '$month': month,
            '$Annual': 'Annual',
            '$Type': booktype
        }

        extensions = ('.cbr', '.cbz', '.cb7')

        if ofilename.lower().endswith(extensions):
            path, ext = os.path.splitext(ofilename)

        if file_format == '':
            logger.fdebug(
                'Rename Files is not enabled - keeping original filename.')
            #check if extension is in nzb_name - will screw up otherwise
            if ofilename.lower().endswith(extensions):
                nfilename = ofilename[:-4]
            else:
                nfilename = ofilename
        else:
            chunk_file_format = re.sub('[()|[]]', '',
                                       chunk_file_format).strip()
            nfilename = helpers.replace_all(chunk_file_format, file_values)
            if mylar.CONFIG.REPLACE_SPACES:
                #mylar.CONFIG.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                nfilename = nfilename.replace(' ', mylar.CONFIG.REPLACE_CHAR)

        nfilename = re.sub('[\,\:]', '', nfilename) + ext.lower()
        logger.fdebug('New Filename: ' + nfilename)

        if mylar.CONFIG.LOWERCASE_FILENAMES:
            nfilename = nfilename.lower()
            dst = os.path.join(comlocation, nfilename)
        else:
            dst = os.path.join(comlocation, nfilename)

        logger.fdebug('Source: ' + ofilename)
        logger.fdebug('Destination: ' + dst)

        rename_this = {
            "destination_dir": dst,
            "nfilename": nfilename,
            "issueid": issueid,
            "comicid": comicid
        }

        return rename_this
Beispiel #50
0
def findComic(name, mode, issue, limityear=None, type=None):

    #with mb_lock:
    comicResults = None
    comicLibrary = listLibrary()
    comiclist = []
    arcinfolist = []

    commons = ['and', 'the', '&', '-']
    for x in commons:
        cnt = 0
        for m in re.finditer(x, name.lower()):
            cnt +=1
            tehstart = m.start()
            tehend = m.end()
            if any([x == 'the', x == 'and']):
                if len(name) == tehend:
                    tehend =-1
                if not all([tehstart == 0, name[tehend] == ' ']) or not all([tehstart != 0, name[tehstart-1] == ' ', name[tehend] == ' ']):
                    continue
            else:
                name = name.replace(x, ' ', cnt)

    originalname = name
    if '+' in name:
       name = re.sub('\+', 'PLUS', name)

    pattern = re.compile(r'\w+', re.UNICODE)
    name = pattern.findall(name)

    if '+' in originalname:
        y = []
        for x in name:
            y.append(re.sub("PLUS", "%2B", x))
        name = y

    if limityear is None: limityear = 'None'

    comicquery = name

    if mylar.CONFIG.COMICVINE_API == 'None' or mylar.CONFIG.COMICVINE_API is None:
        logger.warn('You have not specified your own ComicVine API key - this is a requirement. Get your own @ http://api.comicvine.com.')
        return
    else:
        comicapi = mylar.CONFIG.COMICVINE_API

    if type is None:
        type = 'volume'

    #let's find out how many results we get from the query...
    searched = pullsearch(comicapi, comicquery, 0, type)
    if searched is None:
        return False
    totalResults = searched.getElementsByTagName('number_of_total_results')[0].firstChild.wholeText
    logger.fdebug("there are " + str(totalResults) + " search results...")
    if not totalResults:
        return False
    if int(totalResults) > 1000:
        logger.warn('Search returned more than 1000 hits [' + str(totalResults) + ']. Only displaying first 1000 results - use more specifics or the exact ComicID if required.')
        totalResults = 1000
    countResults = 0
    while (countResults < int(totalResults)):
        #logger.fdebug("querying " + str(countResults))
        if countResults > 0:
            offsetcount = countResults

            searched = pullsearch(comicapi, comicquery, offsetcount, type)
        comicResults = searched.getElementsByTagName(type)
        body = ''
        n = 0
        if not comicResults:
           break
        for result in comicResults:
                #retrieve the first xml tag (<tag>data</tag>)
                #that the parser finds with name tagName:
                arclist = []
                if type == 'story_arc':
                    #call cv.py here to find out issue count in story arc
                    try:
                        logger.fdebug('story_arc ascension')
                        names = len(result.getElementsByTagName('name'))
                        n = 0
                        logger.fdebug('length: ' + str(names))
                        xmlpub = None #set this incase the publisher field isn't populated in the xml
                        while (n < names):
                            logger.fdebug(result.getElementsByTagName('name')[n].parentNode.nodeName)
                            if result.getElementsByTagName('name')[n].parentNode.nodeName == 'story_arc':
                                logger.fdebug('yes')
                                try:
                                    xmlTag = result.getElementsByTagName('name')[n].firstChild.wholeText
                                    xmlTag = xmlTag.rstrip()
                                    logger.fdebug('name: ' + xmlTag)
                                except:
                                    logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.')
                                    return

                            elif result.getElementsByTagName('name')[n].parentNode.nodeName == 'publisher':
                                logger.fdebug('publisher check.')
                                xmlpub = result.getElementsByTagName('name')[n].firstChild.wholeText

                            n+=1
                    except:
                        logger.warn('error retrieving story arc search results.')
                        return

                    siteurl = len(result.getElementsByTagName('site_detail_url'))
                    s = 0
                    logger.fdebug('length: ' + str(names))
                    xmlurl = None
                    while (s < siteurl):
                        logger.fdebug(result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName)
                        if result.getElementsByTagName('site_detail_url')[s].parentNode.nodeName == 'story_arc':
                            try:
                                xmlurl = result.getElementsByTagName('site_detail_url')[s].firstChild.wholeText
                            except:
                                logger.error('There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.')
                                return
                        s+=1

                    xmlid = result.getElementsByTagName('id')[0].firstChild.wholeText

                    if xmlid is not None:
                        arcinfolist = storyarcinfo(xmlid)
                        logger.info('[IMAGE] : ' + arcinfolist['comicimage'])
                        comiclist.append({
                                'name':                 xmlTag,
                                'comicyear':            arcinfolist['comicyear'],
                                'comicid':              xmlid,
                                'cvarcid':              xmlid,
                                'url':                  xmlurl,
                                'issues':               arcinfolist['issues'],
                                'comicimage':           arcinfolist['comicimage'],
                                'publisher':            xmlpub,
                                'description':          arcinfolist['description'],
                                'deck':                 arcinfolist['deck'],
                                'arclist':              arcinfolist['arclist'],
                                'haveit':               arcinfolist['haveit']
                                })
                    else:
                        comiclist.append({
                                'name':                 xmlTag,
                                'comicyear':            arcyear,
                                'comicid':              xmlid,
                                'url':                  xmlurl,
                                'issues':               issuecount,
                                'comicimage':           xmlimage,
                                'publisher':            xmlpub,
                                'description':          xmldesc,
                                'deck':                 xmldeck,
                                'arclist':              arclist,
                                'haveit':               haveit
                                })

                        logger.fdebug('IssueID\'s that are a part of ' + xmlTag + ' : ' + str(arclist))
                else:
                    xmlcnt = result.getElementsByTagName('count_of_issues')[0].firstChild.wholeText
                    #here we can determine what called us, and either start gathering all issues or just limited ones.
                    if issue is not None and str(issue).isdigit():
                        #this gets buggered up with NEW/ONGOING series because the db hasn't been updated
                        #to reflect the proper count. Drop it by 1 to make sure.
                        limiter = int(issue) - 1
                    else: limiter = 0
                    #get the first issue # (for auto-magick calcs)

                    iss_len = len(result.getElementsByTagName('name'))
                    i=0
                    xmlfirst = '1'
                    xmllast = None
                    try:
                        while (i < iss_len):
                            if result.getElementsByTagName('name')[i].parentNode.nodeName == 'first_issue':
                                xmlfirst = result.getElementsByTagName('issue_number')[i].firstChild.wholeText
                                if '\xbd' in xmlfirst:
                                    xmlfirst = '1'  #if the first issue is 1/2, just assume 1 for logistics
                            elif result.getElementsByTagName('name')[i].parentNode.nodeName == 'last_issue':
                                xmllast = result.getElementsByTagName('issue_number')[i].firstChild.wholeText
                            if all([xmllast is not None, xmlfirst is not None]):
                                break
                            i+=1
                    except:
                        xmlfirst = '1'

                    if all([xmlfirst == xmllast, xmlfirst.isdigit(), xmlcnt == '0']):
                        xmlcnt = '1'

                    #logger.info('There are : ' + str(xmlcnt) + ' issues in this series.')
                    #logger.info('The first issue started at # ' + str(xmlfirst))
                    try:
                        d = decimal.Decimal(xmlfirst)
                    except Exception as e:
                        d = 1  # assume 1st issue as #1 if it can't be parsed.
                    if d < 1:
                        cnt_numerical = int(xmlcnt) + 1
                    else:
                        cnt_numerical = int(xmlcnt) + int(math.ceil(d)) # (of issues + start of first issue = numerical range)

                    #logger.info('The maximum issue number should be roughly # ' + str(cnt_numerical))
                    #logger.info('The limiter (issue max that we know of) is # ' + str(limiter))
                    if cnt_numerical >= limiter:
                        cnl = len (result.getElementsByTagName('name'))
                        cl = 0
                        xmlTag = 'None'
                        xml_lastissueid = 'None'
                        while (cl < cnl):
                            if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'volume':
                                xmlTag = result.getElementsByTagName('name')[cl].firstChild.wholeText
                                #break

                            #if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'image':
                            #    xmlimage = result.getElementsByTagName('super_url')[0].firstChild.wholeText

                            if result.getElementsByTagName('name')[cl].parentNode.nodeName == 'last_issue':
                                xml_lastissueid = result.getElementsByTagName('id')[cl].firstChild.wholeText
                            cl+=1

                        try:
                            xmlimage = result.getElementsByTagName('super_url')[0].firstChild.wholeText
                        except Exception:
                            try:
                                xmlimage = result.getElementsByTagName('small_url')[0].firstChild.wholeText
                            except Exception:
                                xmlimage = "cache/blankcover.jpg"

                        if (result.getElementsByTagName('start_year')[0].firstChild) is not None:
                            xmlYr = result.getElementsByTagName('start_year')[0].firstChild.wholeText
                        else: xmlYr = "0000"

                        yearRange = []
                        tmpYr = re.sub('\?', '', xmlYr)

                        if tmpYr.isdigit():

                            yearRange.append(tmpYr)
                            tmpyearRange = int(xmlcnt) / 12
                            if float(tmpyearRange): tmpyearRange +1
                            possible_years = int(tmpYr) + tmpyearRange

                            for i in range(int(tmpYr), int(possible_years),1):
                                if not any(int(x) == int(i) for x in yearRange):
                                    yearRange.append(str(i))

                        logger.fdebug('[RESULT][' + str(limityear) + '] ComicName:' + xmlTag + ' -- ' + str(xmlYr) + ' [Series years: ' + str(yearRange) + ']')
                        if tmpYr != xmlYr:
                            xmlYr = tmpYr

                        if any([v in limityear for v in yearRange]) or limityear == 'None':
                            xmlurl = result.getElementsByTagName('site_detail_url')[0].firstChild.wholeText
                            idl = len (result.getElementsByTagName('id'))
                            idt = 0
                            xmlid = None
                            while (idt < idl):
                                if result.getElementsByTagName('id')[idt].parentNode.nodeName == 'volume':
                                    xmlid = result.getElementsByTagName('id')[idt].firstChild.wholeText
                                    break
                                idt+=1

                            if xmlid is None:
                                logger.error('Unable to figure out the comicid - skipping this : ' + str(xmlurl))
                                continue

                            publishers = result.getElementsByTagName('publisher')
                            if len(publishers) > 0:
                                pubnames = publishers[0].getElementsByTagName('name')
                                if len(pubnames) >0:
                                    xmlpub = pubnames[0].firstChild.wholeText
                                else:
                                    xmlpub = "Unknown"
                            else:
                                xmlpub = "Unknown"

                            #ignore specific publishers on a global scale here.
                            if mylar.CONFIG.IGNORED_PUBLISHERS is not None and any([x for x in mylar.CONFIG.IGNORED_PUBLISHERS if x.lower() == xmlpub.lower()]):
                                logger.fdebug('Ignored publisher [%s]. Ignoring this result.' % xmlpub)
                                continue

                            try:
                                xmldesc = result.getElementsByTagName('description')[0].firstChild.wholeText
                            except:
                                xmldesc = "None"

                            #this is needed to display brief synopsis for each series on search results page.
                            try:
                                xmldeck = result.getElementsByTagName('deck')[0].firstChild.wholeText
                            except:
                                xmldeck = "None"

                            xmltype = None
                            if xmldeck != 'None':
                                if any(['print' in xmldeck.lower(), 'digital' in xmldeck.lower(), 'paperback' in xmldeck.lower(), 'one shot' in re.sub('-', '', xmldeck.lower()).strip(), 'hardcover' in xmldeck.lower()]):
                                    if all(['print' in xmldeck.lower(), 'reprint' not in xmldeck.lower()]):
                                        xmltype = 'Print'
                                    elif 'digital' in xmldeck.lower():
                                        xmltype = 'Digital'
                                    elif 'paperback' in xmldeck.lower():
                                        xmltype = 'TPB'
                                    elif 'hardcover' in xmldeck.lower():
                                        xmltype = 'HC'
                                    elif 'oneshot' in re.sub('-', '', xmldeck.lower()).strip():
                                        xmltype = 'One-Shot'
                                    else:
                                        xmltype = 'Print'

                            if xmldesc != 'None' and xmltype is None:
                                if 'print' in xmldesc[:60].lower() and all(['print edition can be found' not in xmldesc.lower(), 'reprints' not in xmldesc.lower()]):
                                    xmltype = 'Print'
                                elif 'digital' in xmldesc[:60].lower() and 'digital edition can be found' not in xmldesc.lower():
                                    xmltype = 'Digital'
                                elif all(['paperback' in xmldesc[:60].lower(), 'paperback can be found' not in xmldesc.lower()]) or 'collects' in xmldesc[:60].lower():
                                    xmltype = 'TPB'
                                elif 'hardcover' in xmldesc[:60].lower() and 'hardcover can be found' not in xmldesc.lower():
                                    xmltype = 'HC'
                                elif any(['one-shot' in xmldesc[:60].lower(), 'one shot' in xmldesc[:60].lower()]) and any(['can be found' not in xmldesc.lower(), 'following the' not in xmldesc.lower()]):
                                    i = 0
                                    xmltype = 'One-Shot'
                                    avoidwords = ['preceding', 'after the special', 'following the']
                                    while i < 2:
                                        if i == 0:
                                            cbd = 'one-shot'
                                        elif i == 1:
                                            cbd = 'one shot'
                                        tmp1 = xmldesc[:60].lower().find(cbd)
                                        if tmp1 != -1:
                                            for x in avoidwords:
                                                tmp2 = xmldesc[:tmp1].lower().find(x)
                                                if tmp2 != -1:
                                                    xmltype = 'Print'
                                                    i = 3
                                                    break
                                        i+=1
                                else:
                                    xmltype = 'Print'

                            if xmlid in comicLibrary:
                                haveit = comicLibrary[xmlid]
                            else:
                                haveit = "No"
                            comiclist.append({
                                    'name':                 xmlTag,
                                    'comicyear':            xmlYr,
                                    'comicid':              xmlid,
                                    'url':                  xmlurl,
                                    'issues':               xmlcnt,
                                    'comicimage':           xmlimage,
                                    'publisher':            xmlpub,
                                    'description':          xmldesc,
                                    'deck':                 xmldeck,
                                    'type':                 xmltype,
                                    'haveit':               haveit,
                                    'lastissueid':          xml_lastissueid,
                                    'seriesrange':          yearRange  # returning additional information about series run polled from CV
                                    })
                            #logger.fdebug('year: %s - constraint met: %s [%s] --- 4050-%s' % (xmlYr,xmlTag,xmlYr,xmlid))
                        else:
                            #logger.fdebug('year: ' + str(xmlYr) + ' -  contraint not met. Has to be within ' + str(limityear))
                            pass
                n+=1
        #search results are limited to 100 and by pagination now...let's account for this.
        countResults = countResults + 100

    return comiclist
Beispiel #51
0
                logger.fdebug('gzip buffered')
                f = gzip.GzipFile(fileobj=buf)
                logger.fdebug('gzip filed.')
                torrent = f.read()
                logger.fdebug('gzip read.')
        else:
            torrent = response.read()

    except Exception, e:
        logger.warn('Error fetching data from %s: %s' % (site, e))
        return "fail"

    with open(filepath, 'wb') as the_file:
        the_file.write(torrent)

    logger.fdebug("saved.")
    #logger.fdebug('torrent file saved as : ' + str(filepath))
    if mylar.TORRENT_LOCAL:
        return "pass"
    #remote_file = urllib2.urlopen(linkit)
    #if linkit[-7:] != "torrent":
    #    filename += ".torrent"

    #local_file = open('%s' % (os.path.join(mylar.CACHE_DIR,filename)), 'w')
    #local_file.write(remote_file.read())
    #local_file.close()
    #remote_file.close()
    elif mylar.TORRENT_SEEDBOX:
        tssh = ftpsshup.putfile(filepath, filename)
        return tssh
Beispiel #52
0
    def test_notify(self):
        module = '[TEST-NOTIFIER]'
        try:
            r = self._session.get(self.TEST_NMA_URL,
                                  params={'apikey': self.apikey},
                                  verify=True)
        except requests.exceptions.RequestException as e:
            logger.error(
                module + '[' + str(e) +
                '] Unable to send via NMA. Aborting test notification - something is probably wrong...'
            )
            return {'status': False, 'message': str(e)}

        logger.fdebug('[NMA] Status code returned: ' + str(r.status_code))
        if r.status_code == 200:
            from xml.dom.minidom import parseString
            dom = parseString(r.content)
            try:
                success_info = dom.getElementsByTagName('success')
                success_code = success_info[0].getAttribute('code')
            except:
                error_info = dom.getElementsByTagName('error')
                error_code = error_info[0].getAttribute('code')
                error_message = error_info[0].childNodes[0].nodeValue
                logger.info(module + '[' + str(error_code) + '] ' +
                            error_message)
                return {
                    'status': False,
                    'message': '[' + str(error_code) + '] ' + error_message
                }

            else:
                logger.info(
                    module + '[' + str(success_code) +
                    '] NotifyMyAndroid apikey valid. Testing notification service with it.'
                )
        elif r.status_code >= 400 and r.status_code < 500:
            logger.error(module +
                         ' NotifyMyAndroid request failed: %s' % r.content)
            return {
                'status': False,
                'message':
                'Unable to send request to NMA - check your connection.'
            }
        else:
            logger.error(module +
                         ' NotifyMyAndroid notification failed serverside.')
            return {
                'status': False,
                'message': 'Internal Server Error. Try again later.'
            }

        event = 'Test Message'
        description = 'ZOMG Lazors PewPewPew!'
        data = {
            'apikey': self.apikey,
            'application': 'Mylar',
            'event': event.encode('utf-8'),
            'description': description.encode('utf-8'),
            'priority': 2
        }

        return self._send(data, '[NOTIFIER]')
Beispiel #53
0
def newpull():
    pagelinks = "http://www.previewsworld.com/Home/1/1/71/952"

    pageresponse = urllib2.urlopen(pagelinks)
    soup = BeautifulSoup(pageresponse)
    getthedate = soup.findAll("div", {"class": "Headline"})[0]
    #the date will be in the FIRST ahref
    try:
        getdate_link = getthedate('a')[0]
        newdates = getdate_link.findNext(text=True).strip()
    except IndexError:
        newdates = getthedate.findNext(text=True).strip()
    logger.fdebug('New Releases date detected as : ' +
                  re.sub('New Releases For', '', newdates).strip())
    cntlinks = soup.findAll('tr')
    lenlinks = len(cntlinks)

    publish = []
    resultURL = []
    resultmonth = []
    resultyear = []

    x = 0
    cnt = 0
    endthis = False
    pull_list = []

    publishers = {
        '914': 'DARK HORSE COMICS',
        '915': 'DC COMICS',
        '916': 'IDW PUBLISHING',
        '917': 'IMAGE COMICS',
        '918': 'MARVEL COMICS',
        '952': 'COMICS & GRAPHIC NOVELS'
    }

    while (x < lenlinks):
        headt = cntlinks[
            x]  #iterate through the hrefs pulling out only results.
        if 'STK669382' in str(headt):
            x += 1
            continue
        elif '?stockItemID=' in str(headt):
            #914 - Dark Horse Comics
            #915 - DC Comics
            #916 - IDW Publishing
            #917 - Image Comics
            #918 - Marvel Comics
            #952 - Comics & Graphic Novels
            #    - Magazines
            findurl_link = headt.findAll('a', href=True)[0]
            urlID = findurl_link.findNext(text=True)
            issue_link = findurl_link['href']
            issue_lk = issue_link.find('?stockItemID=')
            if issue_lk == -1:
                continue
            #headName = headt.findNext(text=True)
            publisher_id = issue_link[issue_lk - 3:issue_lk]
            for pub in publishers:
                if pub == publisher_id:
                    isspublisher = publishers[pub]
                    #logger.fdebug('publisher:' + str(isspublisher))
                    found_iss = headt.findAll('td')
                    if "Home/1/1/71/920" in issue_link:
                        #logger.fdebug('Ignoring - menu option.')
                        return
                    if "PREVIEWS" in headt:
                        #logger.fdebug('Ignoring: ' + found_iss[0])
                        break
                    if "MAGAZINES" in headt:
                        #logger.fdebug('End.')
                        endthis = True
                        break
                    if len(found_iss) > 0:
                        pull_list.append({
                            "iss_url":
                            found_iss[0],
                            "name":
                            found_iss[1].findNext(text=True),
                            "price":
                            found_iss[2],
                            "publisher":
                            isspublisher,
                            "ID":
                            urlID
                        })

            if endthis == True: break
        x += 1

    logger.fdebug(
        'Saving new pull-list information into local file for subsequent merge'
    )
    except_file = os.path.join(mylar.CACHE_DIR, 'newreleases.txt')
    try:
        csvfile = open(str(except_file), 'rb')
        csvfile.close()
    except (OSError, IOError):
        logger.fdebug('file does not exist - continuing.')
    else:
        logger.fdebug('file exists - removing.')
        os.remove(except_file)

    oldpub = None
    breakhtml = {"<td>", "<tr>", "</td>", "</tr>"}
    with open(str(except_file), 'wb') as f:
        f.write('%s\n' % (newdates))
        for pl in pull_list:
            if pl['publisher'] == oldpub:
                exceptln = str(pl['ID']) + "\t" + str(pl['name']) + "\t" + str(
                    pl['price'])
            else:
                exceptln = pl['publisher'] + "\n" + str(pl['ID']) + "\t" + str(
                    pl['name']) + "\t" + str(pl['price'])

            for lb in breakhtml:
                exceptln = re.sub(lb, '', exceptln).strip()

            exceptline = exceptln.decode('utf-8', 'ignore')
            f.write('%s\n' % (exceptline.encode('ascii', 'replace').strip()))
            oldpub = pl['publisher']
Beispiel #54
0
def extract_image(location, single=False, imquality=None, comicname=None):
    #location = full path to the cbr/cbz (filename included in path)
    #single = should be set to True so that a single file can have the coverfile
    #        extracted and have the cover location returned to the calling function
    #imquality = the calling function ('notif' for notifications will initiate a resize image before saving the cover)
    if PIL_Found is False:
        return
    cover = "notfound"
    pic_extensions = ('.jpg', '.png', '.webp')
    issue_ends = ('1', '0')
    modtime = os.path.getmtime(location)
    low_infile = 999999999999999999
    low_num = 1000
    local_filename = os.path.join(mylar.CONFIG.CACHE_DIR, 'temp_notif')
    cb_filename = None
    cb_filenames = []
    metadata = None
    if single is True:
        location_in, dir_opt = open_archive(location)
        try:
            cntr = 0
            newlencnt = 0
            newlen = 0
            newlist = []
            for infile in location_in.infolist():
                cntr += 1
                basename = os.path.basename(infile.filename)
                if infile.filename == 'ComicInfo.xml':
                    logger.fdebug('Extracting ComicInfo.xml to display.')
                    metadata = location_in.read(infile.filename)
                    if cover == 'found':
                        break
                filename, extension = os.path.splitext(basename)
                tmp_infile = re.sub("[^0-9]", "", filename).strip()
                lenfile = len(infile.filename)
                if any([
                        tmp_infile == '', not getattr(infile, dir_opt), 'zzz'
                        in filename.lower(), 'logo' in filename.lower()
                ]) or ((comicname is not None) and all([
                        comicname.lower().startswith('z'),
                        filename.lower().startswith('z')
                ])):
                    continue
                if all([
                        infile.filename.lower().endswith(pic_extensions),
                        int(tmp_infile) < int(low_infile)
                ]):
                    #logger.info('cntr: %s / infolist: %s' % (cntr, len(location_in.infolist())) )
                    #get the length of the filename, compare it to others. scanner ones are always different named than the other 98% of the files.
                    if lenfile >= newlen:
                        newlen = lenfile
                        newlencnt += 1
                    newlist.append({
                        'length': lenfile,
                        'filename': infile.filename,
                        'tmp_infile': tmp_infile
                    })

                    #logger.info('newlen: %s / newlencnt: %s' % (newlen, newlencnt))
                    if newlencnt > 0 and lenfile >= newlen:
                        #logger.info('setting it to : %s' % infile.filename)
                        low_infile = tmp_infile
                        low_infile_name = infile.filename
                elif any([
                        '00a' in infile.filename, '00b' in infile.filename,
                        '00c' in infile.filename, '00d' in infile.filename,
                        '00e' in infile.filename, '00fc'
                        in infile.filename.lower()
                ]) and infile.filename.endswith(
                        pic_extensions) and cover == "notfound":
                    if cntr == 0:
                        altlist = ('00a', '00b', '00c', '00d', '00e', '00fc')
                        for alt in altlist:
                            if alt in infile.filename.lower():
                                cb_filename = infile.filename
                                cover = "found"
                                #logger.fdebug('[%s] cover found:%s' % (alt, infile.filename))
                                break
                elif all([
                        tmp_infile.endswith(issue_ends),
                        infile.filename.lower().endswith(pic_extensions),
                        int(tmp_infile) < int(low_infile), cover == 'notfound'
                ]):
                    cb_filenames.append(infile.filename)

            if cover != "found" and any(
                [len(cb_filenames) > 0, low_infile != 9999999999999]):
                logger.fdebug(
                    'Invalid naming sequence for jpgs discovered. Attempting to find the lowest sequence and will use as cover (it might not work). Currently : %s'
                    % (low_infile_name))
                # based on newlist - if issue doesn't end in 0 & 1, take the lowest numeric of the most common length of filenames within the rar
                if not any(
                    [low_infile.endswith('0'),
                     low_infile.endswith('1')]):
                    from collections import Counter
                    cnt = Counter([t['length'] for t in newlist])
                    #logger.info('cnt: %s' % (cnt,)) #cnt: Counter({15: 23, 20: 1})
                    tmpst = 999999999
                    cntkey = max(cnt.items(), key=itemgetter(1))[0]
                    #logger.info('cntkey: %s' % cntkey)
                    for x in newlist:
                        if x['length'] == cntkey and int(
                                x['tmp_infile']) < tmpst:
                            tmpst = int(x['tmp_infile'])
                            cb_filename = x['filename']
                            logger.fdebug('SETTING cb_filename set to : %s' %
                                          cb_filename)
                else:
                    cb_filename = low_infile_name
                    cover = "found"

        except Exception as e:
            logger.error(
                '[ERROR] Unable to properly retrieve the cover. It\'s probably best to re-tag this file : %s'
                % e)
            return

        logger.fdebug('cb_filename set to : %s' % cb_filename)

        if extension is not None:
            ComicImage = local_filename + extension
            try:
                insidefile = location_in.getinfo(cb_filename)
                img = Image.open(BytesIO(location_in.read(insidefile)))
                imdata = scale_image(img, "JPEG", 600)
                try:
                    ComicImage = str(base64.b64encode(imdata), 'utf-8')
                    RawImage = imdata
                except Exception as e:
                    ComicImage = str(base64.b64encode(imdata + "==="), 'utf-8')
                    RawImage = imdata + "==="

            except Exception as e:
                logger.warn('[WARNING] Unable to resize existing image: %s' %
                            e)
        else:
            ComicImage = local_filename
    return {
        'ComicImage': ComicImage,
        'metadata': metadata,
        'rawImage': RawImage
    }
Beispiel #55
0
def solicit(month, year):
    #convert to numerics just to ensure this...
    month = int(month)
    year = int(year)

    #print ( "month: " + str(month) )
    #print ( "year: " + str(year) )

    # in order to gather ALL upcoming - let's start to loop through months going ahead one at a time
    # until we get a null then break. (Usually not more than 3 months in advance is available)
    mnloop = 0
    upcoming = []

    publishers = {
        'DC Comics': 'DC Comics',
        'DC\'s': 'DC Comics',
        'Marvel': 'Marvel Comics',
        'Image': 'Image Comics',
        'IDW': 'IDW Publishing',
        'Dark Horse': 'Dark Horse'
    }

    # -- this is no longer needed (testing)
    #    while (mnloop < 5):
    #        if year == 2014:
    #            if len(str(month)) == 1:
    #                month_string = '0' + str(month)
    #            else:
    #                month_string = str(month)
    #            datestring = str(year) + str(month_string)
    #        else:
    #            datestring = str(month) + str(year)

    #        pagelinks = "http://www.comicbookresources.com/tag/solicits" + str(datestring)

    #using the solicits+datestring leaves out some entries occasionally
    #should use http://www.comicbookresources.com/tag/solicitations
    #then just use the logic below but instead of datestring, find the month term and
    #go ahead up to +5 months.

    if month > 0:
        month_start = month
        month_end = month + 5
        #if month_end > 12:
        # ms = 8, me=13  [(12-8)+(13-12)] = [4 + 1] = 5
        # [(12 - ms) + (me - 12)] = number of months (5)

        monthlist = []
        mongr = month_start

        #we need to build the months we can grab, but the non-numeric way.
        while (mongr <= month_end):
            mon = mongr
            if mon == 13:
                mon = 1
                year += 1

            if len(str(mon)) == 1:
                mon = '0' + str(mon)

            monthlist.append({
                "month": helpers.fullmonth(str(mon)).lower(),
                "num_month": mon,
                "year": str(year)
            })
            mongr += 1

        logger.info('months: ' + str(monthlist))

        pagelinks = "http://www.comicbookresources.com/tag/solicitations"

        #logger.info('datestring:' + datestring)
        #logger.info('checking:' + pagelinks)
        pageresponse = urllib2.urlopen(pagelinks)
        soup = BeautifulSoup(pageresponse)
        cntlinks = soup.findAll('h3')
        lenlinks = len(cntlinks)
        #logger.info( str(lenlinks) + ' results' )

        publish = []
        resultURL = []
        resultmonth = []
        resultyear = []

        x = 0
        cnt = 0

        while (x < lenlinks):
            headt = cntlinks[
                x]  #iterate through the hrefs pulling out only results.
            if "/?page=article&amp;id=" in str(headt):
                #print ("titlet: " + str(headt))
                headName = headt.findNext(text=True)
                #print ('headName: ' + headName)
                if 'Image' in headName: print 'IMAGE FOUND'
                if not all([
                        'Marvel' in headName, 'DC' in headName, 'Image'
                        in headName
                ]) and ('Solicitations' in headName or 'Solicits' in headName):
                    # test for month here (int(month) + 5)
                    if not any(
                            d.get('month', None) == str(headName).lower()
                            for d in monthlist):
                        for mt in monthlist:
                            if mt['month'] in headName.lower():
                                logger.info('matched on month: ' +
                                            str(mt['month']))
                                logger.info('matched on year: ' +
                                            str(mt['year']))
                                resultmonth.append(mt['num_month'])
                                resultyear.append(mt['year'])

                                pubstart = headName.find('Solicitations')
                                publishchk = False
                                for pub in publishers:
                                    if pub in headName[:pubstart]:
                                        #print 'publisher:' + str(publishers[pub])
                                        publish.append(publishers[pub])
                                        publishchk = True
                                        break
                                if publishchk == False:
                                    break
                                    #publish.append( headName[:pubstart].strip() )
                                abc = headt.findAll('a', href=True)[0]
                                ID_som = abc[
                                    'href']  #first instance will have the right link...
                                resultURL.append(ID_som)
                                #print '(' + str(cnt) + ') [ ' + publish[cnt] + '] Link URL: ' + resultURL[cnt]
                                cnt += 1

                    else:
                        logger.info('incorrect month - not using.')

            x += 1

        if cnt == 0:
            return  #break  # no results means, end it

        loopthis = (cnt - 1)
        #this loops through each 'found' solicit page
        #shipdate = str(month_string) + '-' + str(year)  - not needed.
        while (loopthis >= 0):
            #print 'loopthis is : ' + str(loopthis)
            #print 'resultURL is : ' + str(resultURL[loopthis])
            shipdate = str(resultmonth[loopthis]) + '-' + str(
                resultyear[loopthis])
            upcoming += populate(resultURL[loopthis], publish[loopthis],
                                 shipdate)
            loopthis -= 1

    logger.info(str(len(upcoming)) + ' upcoming issues discovered.')

    newfl = mylar.CACHE_DIR + "/future-releases.txt"
    newtxtfile = open(newfl, 'wb')

    cntr = 1
    for row in upcoming:
        if row['Extra'] is None or row['Extra'] == '':
            extrarow = 'N/A'
        else:
            extrarow = row['Extra']
        newtxtfile.write(
            str(row['Shipdate']) + '\t' + str(row['Publisher']) + '\t' +
            str(row['Issue']) + '\t' + str(row['Comic']) + '\t' +
            str(extrarow) + '\tSkipped' + '\t' + str(cntr) + '\n')
        cntr += 1

    newtxtfile.close()

    logger.fdebug('attempting to populate future upcoming...')

    mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")

    connection = sqlite3.connect(str(mylardb))
    cursor = connection.cursor()

    # we should extract the issues that are being watched, but no data is available yet ('Watch For' status)
    # once we get the data, store it, wipe the existing table, retrieve the new data, populate the data into
    # the table, recheck the series against the current watchlist and then restore the Watch For data.

    cursor.executescript('drop table if exists future;')

    cursor.execute(
        "CREATE TABLE IF NOT EXISTS future (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, FutureID text, ComicID text);"
    )
    connection.commit()

    csvfile = open(newfl, "rb")
    creader = csv.reader(csvfile, delimiter='\t')

    t = 1

    for row in creader:
        try:
            #print ("Row: %s" % row)
            cursor.execute("INSERT INTO future VALUES (?,?,?,?,?,?,?,null);",
                           row)
        except Exception, e:
            logger.fdebug("Error - invald arguments...-skipping")
            pass
        t += 1
Beispiel #56
0
def extract_image(location, single=False, imquality=None):
    #location = full path to the cbr/cbz (filename included in path)
    #single = should be set to True so that a single file can have the coverfile
    #        extracted and have the cover location returned to the calling function
    #imquality = the calling function ('notif' for notifications will initiate a resize image before saving the cover)
    if PIL_Found is False:
        return
    cover = "notfound"
    pic_extensions = ('.jpg', '.png', '.webp')
    issue_ends = ('1', '0')
    modtime = os.path.getmtime(location)
    low_infile = 9999999999999
    low_num = 1000
    local_filename = os.path.join(mylar.CONFIG.CACHE_DIR, 'temp_notif')
    cb_filename = None
    cb_filenames = []
    metadata = None
    if single is True:
        if location.endswith(".cbz"):
            location_in = zipfile.ZipFile(location)
            dir_opt = 'is_dir'
            actual_ext = '.cbz'
        else:
            try:
                location_in = rarfile.RarFile(location)
                dir_opt = 'isdir'
                actual_ext = '.cbr'
            except rarfile.BadRarFile as e:
                logger.warn('[WARNING] %s: %s' % (location, e))
                try:
                    logger.info(
                        'Trying to see if this is a zip renamed as a rar: %s' %
                        (location))
                    location_in = zipfile.ZipFile(location)
                    dir_opt = 'is_dir'
                    actual_ext = '.cbz'
                except Exception as e:
                    logger.warn('[EXCEPTION] %s' % e)
                    return
            except:
                logger.warn('[EXCEPTION]: %s' % sys.exec_info()[0])
                return
        try:
            for infile in location_in.infolist():
                basename = os.path.basename(infile.filename)
                if infile.filename == 'ComicInfo.xml':
                    logger.fdebug('Extracting ComicInfo.xml to display.')
                    metadata = location_in.read(infile.filename)
                    if cover == 'found':
                        break
                filename, extension = os.path.splitext(basename)
                tmp_infile = re.sub("[^0-9]", "", filename).strip()
                if any([
                        tmp_infile == '', not getattr(infile, dir_opt), 'zzz'
                        in filename
                ]):
                    continue
                #logger.fdebug('[%s]issue_ends: %s' % (tmp_infile, tmp_infile.endswith(issue_ends)))
                #logger.fdebug('ext_ends: %s' % infile.filename.lower().endswith(pic_extensions))
                #logger.fdebug('(%s) < (%s) == %s' % (int(tmp_infile), int(low_infile), int(tmp_infile)<int(low_infile)))
                #logger.fdebug('is_dir == %s' % (not getattr(infile, dir_opt)))
                if all([
                        infile.filename.lower().endswith(pic_extensions),
                        int(tmp_infile) < int(low_infile)
                ]):
                    low_infile = tmp_infile
                    low_infile_name = infile.filename
                elif any([
                        '00a' in infile.filename, '00b' in infile.filename,
                        '00c' in infile.filename, '00d' in infile.filename,
                        '00e' in infile.filename, '00fc'
                        in infile.filename.lower()
                ]) and infile.filename.endswith(
                        pic_extensions) and cover == "notfound":
                    altlist = ('00a', '00b', '00c', '00d', '00e', '00fc')
                    for alt in altlist:
                        if alt in infile.filename.lower():
                            cb_filename = infile.filename
                            cover = "found"
                            #logger.fdebug('[%s] cover found:%s' % (alt, infile.filename))
                            break
                elif all([
                        tmp_infile.endswith(issue_ends),
                        infile.filename.lower().endswith(pic_extensions),
                        int(tmp_infile) < int(low_infile), cover == 'notfound'
                ]):
                    cb_filenames.append(infile.filename)
                    #logger.fdebug('filename set to: %s' % infile.filename)
                    #low_infile_name = infile.filename
                    #low_infile = tmp_infile
            if cover != "found" and any(
                [len(cb_filenames) > 0, low_infile != 9999999999999]):
                logger.fdebug(
                    'Invalid naming sequence for jpgs discovered. Attempting to find the lowest sequence and will use as cover (it might not work). Currently : %s'
                    % (low_infile_name))
                cb_filename = low_infile_name
                cover = "found"

        except Exception as e:
            logger.error(
                '[ERROR] Unable to properly retrieve the cover. It\'s probably best to re-tag this file : %s'
                % e)
            return

        logger.fdebug('cb_filename set to : %s' % cb_filename)

        if extension is not None:
            ComicImage = local_filename + extension
            try:
                insidefile = location_in.getinfo(cb_filename)
                img = Image.open(BytesIO(location_in.read(insidefile)))
                wpercent = (600 / float(img.size[0]))
                hsize = int((float(img.size[1]) * float(wpercent)))
                img = img.resize((600, hsize), Image.ANTIALIAS)
                output = BytesIO()
                img.save(output, format="JPEG")
                try:
                    ComicImage = str(base64.b64encode(output.getvalue()),
                                     'utf-8')
                except Exception as e:
                    ComicImage = str(
                        base64.b64encode(output.getvalue() + "==="), 'utf-8')
                output.close()

            except Exception as e:
                logger.warn('[WARNING] Unable to resize existing image: %s' %
                            e)
        else:
            ComicImage = local_filename
    return {'ComicImage': ComicImage, 'metadata': metadata}
Beispiel #57
0
    def processor(self):
        sendresponse = self.params['nzo_id']
        try:
            logger.fdebug('sending now to %s' % self.sab_url)
            tmp_apikey = self.params['queue'].pop('apikey')
            logger.fdebug('parameters set to %s' % self.params)
            self.params['queue']['apikey'] = tmp_apikey
            time.sleep(
                5
            )  #pause 5 seconds before monitoring just so it hits the queue
            h = requests.get(self.sab_url,
                             params=self.params['queue'],
                             verify=False)
        except Exception as e:
            logger.fdebug('uh-oh: %s' % e)
            return self.historycheck(self.params)
        else:
            queueresponse = h.json()
            logger.fdebug('successfully queried the queue for status')
            try:
                queueinfo = queueresponse['queue']
                #logger.fdebug('queue: %s' % queueinfo)
                logger.fdebug('Queue status : %s' % queueinfo['status'])
                logger.fdebug('Queue mbleft : %s' % queueinfo['mbleft'])

                if str(queueinfo['status']) == 'Paused':
                    logger.warn(
                        '[WARNING] SABnzbd has the active queue Paused. CDH will not work in this state.'
                    )
                    return {'status': 'queue_paused', 'failed': False}
                while any([
                        str(queueinfo['status']) == 'Downloading',
                        str(queueinfo['status']) == 'Idle'
                ]) and float(queueinfo['mbleft']) > 0:
                    #if 'comicrn' in queueinfo['script'].lower():
                    #    logger.warn('ComicRN has been detected as being active for this category & download. Completed Download Handling will NOT be performed due to this.')
                    #    logger.warn('Either disable Completed Download Handling for SABnzbd within Mylar, or remove ComicRN from your category script in SABnzbd.')
                    #    return {'status': 'double-pp', 'failed': False}

                    #logger.fdebug('queue_params: %s' % self.params['queue'])
                    queue_resp = requests.get(self.sab_url,
                                              params=self.params['queue'],
                                              verify=False)
                    queueresp = queue_resp.json()
                    queueinfo = queueresp['queue']
                    logger.fdebug('status: %s' % queueinfo['status'])
                    logger.fdebug('mbleft: %s' % queueinfo['mbleft'])
                    logger.fdebug('timeleft: %s' % queueinfo['timeleft'])
                    logger.fdebug('eta: %s' % queueinfo['eta'])
                    time.sleep(5)
            except Exception as e:
                logger.warn('error: %s' % e)

            logger.info('File has now downloaded!')
            return self.historycheck(self.params)
Beispiel #58
0
    def the_sequence(self):
        if self.sab is True:
            self.completedir()
            self.cats()
            cat_dir = self.cat['dir']
            cat_name = self.cat['name']

            if cat_dir is None:
                logger.fdebug(
                    '[CDH MAPPING] No category defined - using %s as the base download folder with no job folder creation'
                    % self.cdir)
                self.basedir = self.cdir
            else:
                if cat_dir.endswith('*'):
                    logger.fdebug(
                        '[CDH MAPPING][%s] category defined - no job folder creation defined - using %s as base download folder'
                        % (cat_name, cat_dir))
                    self.basedir = cat_dir[:-1]
                else:
                    logger.fdebug(
                        '[CDH MAPPING][%s] category defined - job folder creation defined - using %s as based download folder with sub folder creation'
                        % (cat_name, cat_dir))
                    self.basedir = cat_dir
                    self.subdir = True

        else:
            #query nzbget for categories and if path is different
            self.send_nzbget()
            cat_dir = self.cat['dir']
            cat_name = self.cat['name']
            if cat_dir is None:
                logger.fdebug(
                    '[CDH MAPPING] No category defined - using %s as the base download folder with no job folder creation'
                    % self.cdir)
                self.basedir = self.cdir
            else:
                if self.subdir is False:
                    logger.fdebug(
                        '[CDH MAPPING][%s] category defined - no job folder creation defined - using %s as base download folder'
                        % (cat_name, self.cdir))
                    self.basedir = self.cdir
                else:
                    logger.fdebug(
                        '[CDH MAPPING][%s] category defined - job folder creation defined - using %s as based download folder with sub folder creation'
                        % (cat_name, cat_dir))
                    self.basedir = cat_dir

        logger.fdebug('[CDH MAPPING] Base directory for downloads set to: %s' %
                      (self.basedir))
        logger.fdebug('[CDH MAPPING] Downloaded file @: %s' % self.storage)
        logger.fdebug('[CDH MAPPING] Destination root directory @: %s' %
                      (self.sab_dir))

        if self.subdir is False:
            maindir = self.storage.parents[0]
            file = self.storage.name
        else:
            maindir = self.storage.parents[1]
            file = self.storage.relative_to(maindir)
        final_dst = self.sab_dir.joinpath(file)
        return final_dst
Beispiel #59
0
def nzbdbsearch(seriesname,
                issue,
                comicid=None,
                nzbprov=None,
                searchYear=None,
                ComicVersion=None):
    myDB = db.DBConnection()
    seriesname_alt = None
    if comicid is None or comicid == 'None':
        pass
    else:
        snm = myDB.selectone("SELECT * FROM comics WHERE comicid=?",
                             [comicid]).fetchone()
        if snm is None:
            logger.info('Invalid ComicID of ' + str(comicid) +
                        '. Aborting search.')
            return
        else:
            seriesname = snm['ComicName']
            seriesname_alt = snm['AlternateSearch']

    nsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\-\s]', '%',
                                seriesname)
    formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',
                                  seriesname)
    nsearch = '%' + nsearch_seriesname + "%"
    nresults = myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site=?",
                           [nsearch, nzbprov])
    if nresults is None:
        logger.fdebug('nzb search returned no results for ' + seriesname)
        if seriesname_alt is None:
            logger.fdebug('no nzb Alternate name given. Aborting search.')
            return "no results"
        else:
            chkthealt = seriesname_alt.split('##')
            if chkthealt == 0:
                AS_Alternate = AlternateSearch
            for calt in chkthealt:
                AS_Alternate = re.sub('##', '', calt)
                nresults += myDB.select(
                    "SELECT * FROM rssdb WHERE Title like ? AND Site=?",
                    [AS_Alternate, nzbprov])
            if nresults is None:
                logger.fdebug('nzb alternate name search returned no results.')
                return "no results"

    nzbtheinfo = []
    nzbinfo = {}

    if nzbprov == 'experimental':
        except_list = [
            'releases', 'gold line', 'distribution', '0-day', '0 day'
        ]

        if ComicVersion:
            ComVersChk = re.sub("[^0-9]", "", ComicVersion)
            if ComVersChk == '':
                ComVersChk = 0
            else:
                ComVersChk = 0
        else:
            ComVersChk = 0

        for results in nresults:
            title = results['Title']
            #logger.fdebug("titlesplit: " + str(title.split("\"")))
            splitTitle = title.split("\"")
            noYear = 'False'

            for subs in splitTitle:
                #logger.fdebug(subs)
                if len(subs) > 10 and not any(d in subs.lower()
                                              for d in except_list):
                    if ComVersChk == 0:
                        noYear = 'False'

                    if ComVersChk != 0 and searchYear not in subs:
                        noYear = 'True'
                        noYearline = subs

                    if searchYear in subs and noYear == 'True':
                        #this would occur on the next check in the line, if year exists and
                        #the noYear check in the first check came back valid append it
                        subs = noYearline + ' (' + searchYear + ')'
                        noYear = 'False'

                    if noYear == 'False':

                        nzbtheinfo.append({
                            'title':
                            subs,
                            'link':
                            re.sub('\/release\/', '/download/',
                                   results['Link']),
                            'pubdate':
                            str(results['PubDate']),
                            'site':
                            str(results['Site']),
                            'length':
                            str(results['Size'])
                        })

    else:
        for nzb in nresults:
            # no need to parse here, just compile and throw it back ....
            nzbtheinfo.append({
                'title': nzb['Title'],
                'link': nzb['Link'],
                'pubdate': nzb['Pubdate'],
                'site': nzb['Site'],
                'length': nzb['Size']
            })
            #logger.fdebug("entered info for " + nzb['Title'])

    nzbinfo['entries'] = nzbtheinfo
    return nzbinfo
Beispiel #60
0
def nzbs(provider=None):
    nzbprovider = []
    nzbp = 0
    if mylar.NZBSU == 1:
        nzbprovider.append('nzb.su')
        nzbp += 1
    if mylar.DOGNZB == 1:
        nzbprovider.append('dognzb')
        nzbp += 1
    # --------
    #  Xperimental
    if mylar.EXPERIMENTAL == 1:
        nzbprovider.append('experimental')
        nzbp += 1

    newznabs = 0

    newznab_hosts = []

    if mylar.NEWZNAB == 1:

        for newznab_host in mylar.EXTRA_NEWZNABS:
            if newznab_host[4] == '1' or newznab_host[4] == 1:
                newznab_hosts.append(newznab_host)
                nzbprovider.append('newznab')
                newznabs += 1
                logger.fdebug('newznab name:' + str(newznab_host[0]) +
                              ' - enabled: ' + str(newznab_host[4]))

    # --------
    providercount = int(nzbp + newznabs)
    logger.fdebug('there are : ' + str(providercount) +
                  ' nzb RSS search providers you have enabled.')
    nzbpr = providercount - 1
    if nzbpr < 0:
        nzbpr == 0

    feeddata = []
    feedthis = []
    ft = 0
    totNum = 0
    nonexp = "no"

    while (nzbpr >= 0):
        if nzbprovider[nzbpr] == 'experimental':
            feed = feedparser.parse(
                "http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&max=50&more=1"
            )

            totNum = len(feed.entries)
            site = 'experimental'
            keyPair = {}
            regList = []
            entries = []
            mres = {}
            countUp = 0

            i = 0
            for entry in feed['entries']:
                tmpsz = feed.entries[i].enclosures[0]
                feeddata.append({
                    'Site': site,
                    'Title': feed.entries[i].title,
                    'Link': tmpsz['url'],  #feed.entries[i].link,
                    'Pubdate': feed.entries[i].updated,
                    'Size': tmpsz['length']
                })
                #                print ("Site:" + str(site))
                #                print ("Title:" + str(feed.entries[i].title))
                #                print ("Link:" + str(feed.entries[i].link))
                #                print ("Pubdate:" + str(feed.entries[i].updated))
                #                print ("Size:" + str(tmpsz['length']))
                i += 1
            logger.info(str(i) + ' results from Experimental feed indexed.')
            nzbpr -= 1
        else:
            if nzbprovider[nzbpr] == 'newznab':
                for newznab_host in newznab_hosts:
                    if newznab_host[3] is None:
                        newznabuid = '1'
                        newznabcat = '7030'
                    else:
                        if '#' not in newznab_host[3]:
                            newznabuid = newznab_host[3]
                            newznabcat = '7030'
                        else:
                            newzst = newznab_host[3].find('#')
                            newznabuid = newznab_host[3][:newzst]
                            newznabcat = newznab_host[3][newzst + 1:]
                    feed = newznab_host[1].rstrip() + '/rss?t=' + str(
                        newznabcat) + '&dl=1&i=' + str(
                            newznabuid) + '&r=' + newznab_host[2].rstrip()
                    feedme = feedparser.parse(feed)
                    site = newznab_host[0].rstrip()
                    feedthis.append({"feed": feedme, "site": site})
                    totNum += len(feedme.entries)
                    ft += 1
                    nonexp = "yes"
                    nzbpr -= 1
            elif nzbprovider[nzbpr] == 'nzb.su':
                if mylar.NZBSU_UID is None:
                    mylar.NZBSU_UID = '1'
                feed = 'http://api.nzb.su/rss?t=7030&dl=1&i=' + mylar.NZBSU_UID + '&r=' + mylar.NZBSU_APIKEY
                feedme = feedparser.parse(feed)
                site = nzbprovider[nzbpr]
                feedthis.append({"feed": feedme, "site": site})
                totNum += len(feedme.entries)
                ft += 1
                nonexp = "yes"
                nzbpr -= 1
            elif nzbprovider[nzbpr] == 'dognzb':
                if mylar.DOGNZB_UID is None:
                    mylar.DOGNZB_UID = '1'
                feed = 'https://dognzb.cr/rss.cfm?r=' + mylar.DOGNZB_APIKEY + '&t=7030'
                feedme = feedparser.parse(feed)
                site = nzbprovider[nzbpr]
                ft += 1
                nonexp = "yes"
                feedthis.append({"feed": feedme, "site": site})
                totNum += len(feedme.entries)
                nzbpr -= 1

    i = 0
    if nonexp == "yes":
        #print str(ft) + " sites checked. There are " + str(totNum) + " entries to be updated."
        #print feedme

        for ft in feedthis:
            sitei = 0
            site = ft['site']
            logger.fdebug(str(site) + " now being updated...")
            #logger.fdebug('feedthis:' + str(ft))
            for entry in ft['feed'].entries:
                if site == 'dognzb':
                    #because the rss of dog doesn't carry the enclosure item, we'll use the newznab size value
                    tmpsz = 0
                    #for attr in entry['newznab:attrib']:
                    #    if attr('@name') == 'size':
                    #        tmpsz = attr['@value']
                    #        logger.fdebug('size retrieved as ' + str(tmpsz))
                    #        break
                    feeddata.append({
                        'Site': site,
                        'Title': entry.title,  #ft['feed'].entries[i].title,
                        'Link': entry.link,  #ft['feed'].entries[i].link,
                        'Pubdate':
                        entry.updated,  #ft['feed'].entries[i].updated,
                        'Size': tmpsz
                    })
                else:
                    #this should work for all newznabs (nzb.su included)
                    #only difference is the size of the file between this and above (which is probably the same)
                    tmpsz = entry.enclosures[
                        0]  #ft['feed'].entries[i].enclosures[0]
                    feeddata.append({
                        'Site': site,
                        'Title': entry.title,  #ft['feed'].entries[i].title,
                        'Link': entry.link,  #ft['feed'].entries[i].link,
                        'Pubdate':
                        entry.updated,  #ft['feed'].entries[i].updated,
                        'Size': tmpsz['length']
                    })

                #logger.fdebug("Site: " + str(feeddata[i]['Site']))
                #logger.fdebug("Title: " + str(feeddata[i]['Title']))
                #logger.fdebug("Link: " + str(feeddata[i]['Link']))
                #logger.fdebug("pubdate: " + str(feeddata[i]['Pubdate']))
                #logger.fdebug("size: " + str(feeddata[i]['Size']))
                sitei += 1
            logger.info('[' + str(site) + '] ' + str(sitei) +
                        ' entries indexed.')
            i += sitei
    if i > 0:
        logger.info(
            '[RSS] ' + str(i) +
            ' entries have been indexed and are now going to be stored for caching.'
        )
        rssdbupdate(feeddata, i, 'usenet')
    return