Example #1
0
    def notify(self, event, message=None, snatched_nzb=None, prov=None, sent_to=None, module=None):
        if not mylar.PUSHOVER_ENABLED:
            return
        if module is None:
            module = ''
        module += '[NOTIFIER]'

        if snatched_nzb:
            if snatched_nzb[-1] == '\.': 
                snatched_nzb = snatched_nzb[:-1]
            message = "Mylar has snatched: " + snatched_nzb + " from " + prov + " and has sent it to " + sent_to

        data = {'token': mylar.PUSHOVER_APIKEY,
                'user': mylar.PUSHOVER_USERKEY,
                'message': message.encode("utf-8"),
                'title': event,
                'priority': mylar.PUSHOVER_PRIORITY}

        r = self._session.post(self.PUSHOVER_URL, data=data, verify=True)

        if r.status_code == 200:
            logger.info(module + ' PushOver notifications sent.')
            return True
        elif r.status_code >= 400 and r.status_code < 500:
            logger.error(module + ' PushOver request failed: %s' % r.content)
            return False
        else:
            logger.error(module + ' PushOver notification failed serverside.')
            return False
Example #2
0
def sendfiles(filelist):

    try:
        import paramiko
    except ImportError:
        logger.fdebug('paramiko not found on system. Please install manually in order to use seedbox option')
        logger.fdebug('get it at https://github.com/paramiko/paramiko')
        logger.fdebug('to install: python setup.py install')
        logger.fdebug('aborting send.')
        return

    fhost = mylar.CONFIG.TAB_HOST.find(':')
    host = mylar.CONFIG.TAB_HOST[:fhost]
    port = int(mylar.CONFIG.TAB_HOST[fhost +1:])

    logger.fdebug('Destination: ' + host)
    logger.fdebug('Using SSH port : ' + str(port))

    transport = paramiko.Transport((host, port))

    password = mylar.CONFIG.TAB_PASS
    username = mylar.CONFIG.TAB_USER
    transport.connect(username = username, password = password)

    sftp = paramiko.SFTPClient.from_transport(transport)

    remotepath = mylar.CONFIG.TAB_DIRECTORY
    logger.fdebug('remote path set to ' + remotepath)

    if len(filelist) > 0:
        logger.info('Initiating send for ' + str(len(filelist)) + ' files...')
        return sendtohome(sftp, remotepath, filelist, transport)
Example #3
0
def shutdown(restart=False, update=False):

    cherrypy.engine.exit()
    halt()

    if not restart and not update:
        logger.info('Mylar is shutting down...')
    if update:
        logger.info('Mylar is updating...')
        try:
            versioncheck.update()
        except Exception as e:
            logger.warn('Mylar failed to update: %s. Restarting.' % e)

    if CREATEPID:
        logger.info('Removing pidfile %s' % PIDFILE)
        os.remove(PIDFILE)

    if restart:
        logger.info('Mylar is restarting...')
        popen_list = [sys.executable, FULL_PATH]
        popen_list += ARGS
#        if '--nolaunch' not in popen_list:
#            popen_list += ['--nolaunch']
        logger.info('Restarting Mylar with ' + str(popen_list))
        subprocess.Popen(popen_list, cwd=os.getcwd())

    os._exit(0)
Example #4
0
def torsend2client(seriesname, linkit, site):
    logger.info('matched on ' + str(seriesname))
    filename = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',seriesname)
    if site == 'ComicBT':
        logger.info(linkit)
        linkit = str(linkit) + '&passkey=' + str(mylar.CBT_PASSKEY)

    if linkit[-7:] != "torrent":
        filename += ".torrent"

    request = urllib2.Request(linkit)
    request.add_header('User-Agent', str(mylar.USER_AGENT))
    if mylar.TORRENT_LOCAL and mylar.LOCAL_WATCHDIR is not None:
        filepath = os.path.join(mylar.LOCAL_WATCHDIR, filename)
        logger.fdebug('filename for torrent set to : ' + filepath)
    elif mylar.TORRENT_SEEDBOX and mylar.SEEDBOX_WATCHDIR is not None:
        filepath = os.path.join(mylar.CACHE_DIR, filename)
        logger.fdebug('filename for torrent set to : ' + filepath)
    else:
        logger.error('No Local Watch Directory or Seedbox Watch Directory specified. Set it and try again.')
        return "fail"

    try:
        opener = helpers.urlretrieve(urllib2.urlopen(request), filepath)
    except Exception, e:
        logger.warn('Error fetching data from %s: %s' % (site, e))
        return "fail"
Example #5
0
    def notify(self, ComicName=None, Year=None, Issue=None, sent_to=None, snatched_nzb=None, username=None, force=False):
        """
        Sends a boxcar notification based on the provided info or SB config

        title: The title of the notification to send
        message: The message string to send
        username: The username to send the notification to (optional, defaults to the username in the config)
        force: If True then the notification will be sent even if Boxcar is disabled in the config
        """

        if not mylar.BOXCAR_ENABLED and not force:
            logger.fdebug("Notification for Boxcar not enabled, skipping this notification")
            return False

        # if no username was given then use the one from the config
        if not username:
            username = mylar.BOXCAR_USERNAME


        if snatched_nzb:
            title = "Mylar. Sucessfully Snatched!"
            message = "Mylar has snatched: " + snatched_nzb + " and has sent it to " + sent_to
        else:
            title = "Mylar. Successfully Downloaded & Post-Processed!"
            message = "Mylar has downloaded and postprocessed: " + ComicName + ' (' + Year + ') #' + Issue


        logger.info("Sending notification to Boxcar")

        self._sendBoxcar(message, title, username)
        return True
Example #6
0
    def addfile(self, filepath=None, filename=None, bytes=None):
        params = {'action': 'add-file', 'token': self.token}
        try:
            d = open(filepath, 'rb')
            tordata = d.read()
            d.close()
        except:
            logger.warn('Unable to load torrent file. Aborting at this time.')
            return 'fail'

        files = {'torrent_file': tordata}
        try:
            r = requests.post(url=self.utorrent_url, auth=self.auth, cookies=self.cookies, params=params, files=files)
        except requests.exceptions.RequestException as err:
            logger.debug('URL: ' + str(self.utorrent_url))
            logger.debug('Error sending to uTorrent Client. uTorrent responded with error: ' + str(err))
            return 'fail'


        # (to-do) verify the hash in order to ensure it's loaded here
        if str(r.status_code) == '200':
            logger.info('Successfully added torrent to uTorrent client.')
            hash = self.calculate_torrent_hash(data=tordata)
            if mylar.UTORRENT_LABEL:
                try:
                    self.setlabel(hash)
                except:
                    logger.warn('Unable to set label for torrent.')
            return hash
        else:
            return 'fail'
Example #7
0
    def notify(self, ComicName=None, Year=None, Issue=None, sent_to=None, snatched_nzb=None, force=False, module=None):
        """
        Sends a boxcar notification based on the provided info or SB config

        title: The title of the notification to send
        message: The message string to send
        force: If True then the notification will be sent even if Boxcar is disabled in the config
        """
        if module is None:
            module = ''
        module += '[NOTIFIER]'

        if not mylar.BOXCAR_ENABLED and not force:
            logger.fdebug(module + ' Notification for Boxcar not enabled, skipping this notification.')
            return False

        # if no username was given then use the one from the config
        if snatched_nzb:
            title = "Mylar. Sucessfully Snatched!"
            message = "Mylar has snatched: " + snatched_nzb + " and has sent it to " + sent_to
        else:
            title = "Mylar. Successfully Downloaded & Post-Processed!"
            message = "Mylar has downloaded and postprocessed: " + ComicName + ' (' + Year + ') #' + Issue


        logger.info(module + ' Sending notification to Boxcar2')

        self._sendBoxcar(message, title, module)
        return True
Example #8
0
    def markasRead(self):
        myDB = db.DBConnection()
        if self.IssueID:
            issue = myDB.selectone('SELECT * from readlist WHERE IssueID=?', [self.IssueID]).fetchone()
            if issue['Status'] == 'Read':
                NewVal = {"Status":  "Added"}
            else:
                NewVal = {"Status":    "Read"}

            NewVal['StatusChange'] = helpers.today()

            CtrlVal = {"IssueID":  self.IssueID}
            myDB.upsert("readlist", NewVal, CtrlVal)
            logger.info(self.module + ' Marked ' + issue['ComicName'] + ' #' + str(issue['Issue_Number']) + ' as Read.')
        elif self.IssueArcID:
            issue = myDB.selectone('SELECT * from readinglist WHERE IssueArcID=?', [self.IssueArcID]).fetchone()
            if issue['Status'] == 'Read':
                NewVal = {"Status":    "Added"}
            else:
                NewVal = {"Status":    "Read"}
            NewVal['StatusChange'] = helpers.today()
            CtrlVal = {"IssueArcID":  self.IssueArcID}
            myDB.upsert("readinglist", NewVal, CtrlVal)
            logger.info(self.module + ' Marked ' +  issue['ComicName'] + ' #' + str(issue['IssueNumber']) + ' as Read.')

        return
Example #9
0
    def downloadfile(self, payload, filepath):
        url = 'https://32pag.es/torrents.php'
        try:
            r = self.session.get(url, params=payload, verify=True, stream=True, allow_redirects=True)
        except Exception as e:
            logger.error('%s [%s] Could not POST URL %s' % ('[32P-DOWNLOADER]', e, url))
            return False

        if str(r.status_code) != '200':
            logger.warn('Unable to download torrent from 32P [Status Code returned: %s]' % r.status_code)
            if str(r.status_code) == '404' and site == '32P':
                logger.warn('[32P-CACHED_ENTRY] Entry found in 32P cache - incorrect. Torrent has probably been merged into a pack, or another series id. Removing from cache.')
                helpers.delete_cache_entry(linkit)
            else:
                logger.info('content: %s' % r.content)
            return False


        with open(filepath, 'wb') as f:
            for chunk in r.iter_content(chunk_size=1024):
                if chunk: # filter out keep-alive new chunks
                    f.write(chunk)
                    f.flush()

        return True
Example #10
0
    def notify(self, text, attachment_text, snatched_nzb=None, prov=None, sent_to=None, module=None):
        if module is None:
            module = ''
        module += '[NOTIFIER]'

        if all([sent_to is not None, prov is not None]):
            attachment_text += ' from %s and %s' % (prov, sent_to)
        elif sent_to is None:
            attachment_text += ' from %s' % prov
        else:
            pass

        payload = {
#            "text": text,
#            "attachments": [
#                {
#                    "color": "#36a64f",
#                    "text": attachment_text
#                }
#            ]
# FIX: #1861 move notif from attachment to msg body - bbq
            "text": attachment_text
        }

        try:
            response = requests.post(self.webhook_url, json=payload, verify=True)
        except Exception, e:
            logger.info(module + u'Slack notify failed: ' + str(e))
Example #11
0
    def comic_config(self, com_location, ComicID, alt_search=None, fuzzy_year=None):
        myDB = db.DBConnection()
#--- this is for multipe search terms............
#--- works, just need to redo search.py to accomodate multiple search terms
#        ffs_alt = []
#        if '+' in alt_search:
            #find first +
#            ffs = alt_search.find('+')
#            ffs_alt.append(alt_search[:ffs])
#            ffs_alt_st = str(ffs_alt[0])
#            print("ffs_alt: " + str(ffs_alt[0]))

            # split the entire string by the delimter + 
#            ffs_test = alt_search.split('+')
#            if len(ffs_test) > 0:
#                print("ffs_test names: " + str(len(ffs_test)))
#                ffs_count = len(ffs_test)
#                n=1
#                while (n < ffs_count):
#                    ffs_alt.append(ffs_test[n])
#                    print("adding : " + str(ffs_test[n]))
                    #print("ffs_alt : " + str(ffs_alt))
#                    ffs_alt_st = str(ffs_alt_st) + "..." + str(ffs_test[n])
#                    n+=1
#            asearch = ffs_alt
#        else:
#            asearch = alt_search
        asearch = str(alt_search)

        controlValueDict = {'ComicID': ComicID}
        newValues = {"ComicLocation":        com_location }
                     #"QUALalt_vers":         qual_altvers,
                     #"QUALScanner":          qual_scanner,
                     #"QUALtype":             qual_type,
                     #"QUALquality":          qual_quality
                     #}
        if asearch is not None:
            if asearch == '':
                newValues['AlternateSearch'] = "None"
            else:
                newValues['AlternateSearch'] = str(asearch)

        if fuzzy_year is None:
            newValues['UseFuzzy'] = "0"
        else:
            newValues['UseFuzzy'] = str(fuzzy_year)

        #force the check/creation of directory com_location here
        if os.path.isdir(str(com_location)):
            logger.info(u"Validating Directory (" + str(com_location) + "). Already exists! Continuing...")
        else:
            logger.fdebug("Updated Directory doesn't exist! - attempting to create now.")
            try:
                os.makedirs(str(com_location))
                logger.info(u"Directory successfully created at: " + str(com_location))
            except OSError:
                logger.error(u"Could not create comicdir : " + str(com_location))

        myDB.upsert("comics", newValues, controlValueDict)
        raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
Example #12
0
 def deleteArtist(self, ComicID):
     myDB = db.DBConnection()
     comic = myDB.action('SELECT * from comics WHERE ComicID=?', [ComicID]).fetchone()
     logger.info(u"Deleting all traces of Comic: " + comic['ComicName'])
     myDB.action('DELETE from comics WHERE ComicID=?', [ComicID])
     myDB.action('DELETE from issues WHERE ComicID=?', [ComicID])
     raise cherrypy.HTTPRedirect("home")
Example #13
0
def versionload():

    mylar.CURRENT_VERSION, mylar.CONFIG.GIT_BRANCH = getVersion()

    if mylar.CURRENT_VERSION is not None:
        hash = mylar.CURRENT_VERSION[:7]
    else:
        hash = "unknown"

    if mylar.CONFIG.GIT_BRANCH == 'master':
        vers = 'M'
    elif mylar.CONFIG.GIT_BRANCH == 'development':
        vers = 'D'
    else:
        vers = 'NONE'

    mylar.USER_AGENT = 'Mylar/' +str(hash) +'(' +vers +') +http://www.github.com/evilhero/mylar/'

    logger.info('Version information: %s [%s]' % (mylar.CONFIG.GIT_BRANCH, mylar.CURRENT_VERSION))

    if mylar.CONFIG.CHECK_GITHUB_ON_STARTUP:
        try:
            mylar.LATEST_VERSION = checkGithub() #(CURRENT_VERSION)
        except:
            mylar.LATEST_VERSION = mylar.CURRENT_VERSION
    else:
        mylar.LATEST_VERSION = mylar.CURRENT_VERSION

    if mylar.CONFIG.AUTO_UPDATE:
        if mylar.CURRENT_VERSION != mylar.LATEST_VERSION and mylar.INSTALL_TYPE != 'win' and mylar.COMMITS_BEHIND > 0:
             logger.info('Auto-updating has been enabled. Attempting to auto-update.')
             mylar.SIGNAL = 'update'
Example #14
0
def sendtohome(sftp, remotepath, filelist, transport):
    fhost = mylar.CONFIG.TAB_HOST.find(':')
    host = mylar.CONFIG.TAB_HOST[:fhost]
    port = int(mylar.CONFIG.TAB_HOST[fhost +1:])

    successlist = []
    filestotal = len(filelist)

    for files in filelist:
        tempfile = files['filename']
        issid = files['issueid']
        logger.fdebug('Checking filename for problematic characters: ' + tempfile)
        #we need to make the required directory(ies)/subdirectories before the get will work.
        if u'\xb4' in files['filename']:
            # right quotation
            logger.fdebug('detected abnormal character in filename')
            filename = tempfile.replace('0xb4', '\'')
        if u'\xbd' in files['filename']:
            # 1/2 character
            filename = tempfile.replace('0xbd', 'half')
        if u'\uff1a' in files['filename']:
            #some unknown character
            filename = tempfile.replace('\0ff1a', '-')

        #now we encode the structure to ascii so we can write directories/filenames without error.
        filename = tempfile.encode('ascii', 'ignore')

        remdir = remotepath

        if mylar.CONFIG.MAINTAINSERIESFOLDER == 1:
            # Get folder path of issue
            comicdir = os.path.split(files['filepath'])[0]
            # Isolate comic folder name
            comicdir = os.path.split(comicdir)[1]
            logger.info('Checking for Comic Folder: ' + comicdir)
            chkdir = os.path.join(remdir, comicdir)
            try:
                sftp.stat(chkdir)
            except IOError, e:
                logger.info('Comic Folder does not Exist, creating ' + chkdir )
                try:
                    sftp.mkdir(chkdir)
                except :
                    # Fallback to default behavior
                    logger.info('Could not create Comic Folder, adding to device root')
                else :
                    remdir = chkdir
            else :
                remdir = chkdir

        localsend = files['filepath']
        logger.info('Sending : ' + localsend)
        remotesend = os.path.join(remdir, filename)
        logger.info('To : ' + remotesend)

        try:
            sftp.stat(remotesend)
        except IOError, e:
            if e[0] == 2:
                filechk = False
Example #15
0
 def resumeArtist(self, ComicID):
     logger.info(u"Resuming comic: " + ComicID)
     myDB = db.DBConnection()
     controlValueDict = {'ComicID': ComicID}
     newValueDict = {'Status': 'Active'}
     myDB.upsert("comics", newValueDict, controlValueDict)
     raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
Example #16
0
File: mb.py Project: cdj/mylar
def pullsearch(comicapi,comicquery,offset,explicit,type):
    u_comicquery = urllib.quote(comicquery.encode('utf-8').strip())
    u_comicquery = u_comicquery.replace(" ", "%20")

    if explicit == 'all' or explicit == 'loose':
        PULLURL = mylar.CVURL + 'search?api_key=' + str(comicapi) + '&resources=' + str(type) + '&query=' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&page=' + str(offset)

    else:
        # 02/22/2014 use the volume filter label to get the right results.
        # add the 's' to the end of type to pluralize the caption (it's needed)
        if type == 'story_arc':
            logger.info('redefining.')
            u_comicquery = re.sub("%20AND%20", "%20", u_comicquery)
        PULLURL = mylar.CVURL + str(type) + 's?api_key=' + str(comicapi) + '&filter=name:' + u_comicquery + '&field_list=id,name,start_year,site_detail_url,count_of_issues,image,publisher,description&format=xml&offset=' + str(offset) # 2012/22/02 - CVAPI flipped back to offset instead of page

    #all these imports are standard on most modern python implementations
    #CV API Check here.
    if mylar.CVAPI_COUNT == 0 or mylar.CVAPI_COUNT >= mylar.CVAPI_MAX:
        cvapi_check()
    #download the file:
    try:
        file = urllib2.urlopen(PULLURL)
    except urllib2.HTTPError, err:
        logger.error('err : ' + str(err))
        logger.error("There was a major problem retrieving data from ComicVine - on their end. You'll have to try again later most likely.")
        return        
Example #17
0
    def __init__(self, reauthenticate=False, searchterm=None, test=False):

        self.module = '[32P-AUTHENTICATION]'
        self.url = 'https://32pag.es/user.php?action=notify'
        self.headers = {'Content-type': 'application/x-www-form-urlencoded',
                        'Accept-Charset': 'utf-8',
                        'User-Agent': 'Mozilla/5.0'}

        self.error = None
        self.method = None
        lses = self.LoginSession(mylar.USERNAME_32P, mylar.PASSWORD_32P)

        if not lses.login():
            if not self.test:
                logger.error(self.module + ' [LOGIN FAILED] Disabling 32P provider until login error(s) can be fixed in order to avoid temporary bans.')
                return "disable"
            else:
                if self.error:
                    return self.error #rtnmsg
                else:
                    return self.method
        else:
            logger.info(self.module + '[LOGIN SUCCESS] Now preparing for the use of 32P keyed authentication...')
            self.authkey = lses.authkey
            self.passkey = lses.passkey
            self.uid = lses.uid
         
        self.reauthenticate = reauthenticate
        self.searchterm = searchterm
        self.test = test
        self.publisher_list = {'Entertainment', 'Press', 'Comics', 'Publishing', 'Comix', 'Studios!'}
Example #18
0
    def runAction(self):

        while True:

            currentTime = datetime.datetime.now()

            if currentTime - self.lastRun > self.cycleTime:
                self.lastRun = currentTime
                try:
                    if not self.silent:
                        logger.fdebug("Starting new thread: " + self.threadName)

                    if self.delay:
                        logger.info('delaying startup thread for ' + str(self.delay) + ' seconds to avoid locks.')
                        time.sleep(self.delay)

                    self.action.run()
                except Exception, e:
                    logger.fdebug("Exception generated in thread " + self.threadName + ": %s" % e)
                    logger.fdebug(repr(traceback.format_exc()))

            if self.abort:
                self.abort = False
                self.thread = None
                return

            time.sleep(1)
Example #19
0
    def notify(self, ComicName=None, Year=None, Issue=None, snatched_nzb=None, sent_to=None):

        apikey = self.apikey
        priority = self.priority

        if snatched_nzb:
            event = snatched_nzb + " snatched!"
            description = "Mylar has snatched: " + snatched_nzb + " and has sent it to " + sent_to
        else:
            event = ComicName + " (" + Year + ") - Issue #" + Issue + " complete!"
            description = "Mylar has downloaded and postprocessed: " + ComicName + " (" + Year + ") #" + Issue

        data = {
            "apikey": apikey,
            "application": "Mylar",
            "event": event,
            "description": description,
            "priority": priority,
        }

        logger.info("Sending notification request to NotifyMyAndroid")
        request = self._send(data)

        if not request:
            logger.warn("Error sending notification request to NotifyMyAndroid")
Example #20
0
def movefiles(comicid, comlocation, imported):
    #comlocation is destination
    #comicid is used for rename
    files_moved = []
    try:
        imported = ast.literal_eval(imported)
    except ValueError:
        pass

    myDB = db.DBConnection()

    logger.fdebug('comlocation is : ' + comlocation)
    logger.fdebug('original comicname is : ' + imported['ComicName'])

    impres = imported['filelisting']

    if impres is not None:
        for impr in impres:
            srcimp = impr['comiclocation']
            orig_filename = impr['comicfilename']
            #before moving check to see if Rename to Mylar structure is enabled.
            if mylar.IMP_RENAME and mylar.FILE_FORMAT != '':
                logger.fdebug("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT))
                renameit = helpers.rename_param(comicid, imported['ComicName'], impr['issuenumber'], orig_filename)
                nfilename = renameit['nfilename']
                dstimp = os.path.join(comlocation, nfilename)
            else:
                logger.fdebug("Renaming files not enabled, keeping original filename(s)")
                dstimp = os.path.join(comlocation, orig_filename)

            logger.info("moving " + srcimp + " ... to " + dstimp)
            try:
                shutil.move(srcimp, dstimp)
                files_moved.append({'srid':     imported['srid'],
                                    'filename': impr['comicfilename']})
            except (OSError, IOError):
                logger.error("Failed to move files - check directories and manually re-run.")

        logger.fdebug("all files moved.")
        #now that it's moved / renamed ... we remove it from importResults or mark as completed.

    if len(files_moved) > 0:
        logger.info('files_moved: ' + str(files_moved))
        for result in files_moved:
            try:
                res = result['import_id']
            except:
                #if it's an 'older' import that wasn't imported, just make it a basic match so things can move and update properly.
                controlValue = {"ComicFilename": result['filename'],
                                "SRID":          result['srid']}
                newValue = {"Status":            "Imported",
                            "ComicID":           comicid}
            else:                 
                controlValue = {"impID":         result['import_id'],
                                "ComicFilename": result['filename']}
                newValue = {"Status":            "Imported",
                            "SRID":              result['srid'],
                            "ComicID":           comicid}
            myDB.upsert("importresults", newValue, controlValue)
    return
Example #21
0
    def notify(self, prline=None, prline2=None, sent_to=None, snatched_nzb=None, force=False, module=None, snline=None):
        """
        Sends a boxcar notification based on the provided info or SB config

        title: The title of the notification to send
        message: The message string to send
        force: If True then the notification will be sent even if Boxcar is disabled in the config
        """
        if module is None:
            module = ''
        module += '[NOTIFIER]'

        if not mylar.BOXCAR_ENABLED and not force:
            logger.fdebug(module + ' Notification for Boxcar not enabled, skipping this notification.')
            return False

        # if no username was given then use the one from the config
        if snatched_nzb:
            title = snline
            message = "Mylar has snatched: " + snatched_nzb + " and has sent it to " + sent_to
        else:
            title = prline
            message = prline2

        logger.info(module + ' Sending notification to Boxcar2')

        self._sendBoxcar(message, title, module)
        return True
Example #22
0
 def run(self):
     logger.info('[VersionCheck] Checking for new release on Github.')
     helpers.job_management(write=True, job='Check Version', current_run=helpers.utctimestamp(), status='Running')
     mylar.VERSION_STATUS = 'Running'
     versioncheck.checkGithub()
     helpers.job_management(write=True, job='Check Version', last_run_completed=helpers.utctimestamp(), status='Waiting')
     mylar.VERSION_STATUS = 'Waiting'
Example #23
0
 def run(self):
     import PostProcessor, logger
     #monitor a selected folder for 'snatched' files that haven't been processed
     logger.info('Checking folder ' + mylar.CHECK_FOLDER + ' for newly snatched downloads')
     PostProcess = PostProcessor.PostProcessor('Manual Run', mylar.CHECK_FOLDER)
     result = PostProcess.Process()
     logger.info('Finished checking for newly snatched downloads')
Example #24
0
def halt():
    global _INITIALIZED, started

    with INIT_LOCK:

        if _INITIALIZED:

            logger.info('Shutting down the background schedulers...')
            SCHED.shutdown(wait=False)

            if NZBPOOL is not None:
                logger.info('Terminating the nzb auto-complete thread.')
                try:
                    NZBPOOL.join(10)
                    logger.info('Joined pool for termination -  successful')
                except KeyboardInterrupt:
                    NZB_QUEUE.put('exit')
                    NZBPOOL.join(5)
                except AssertionError:
                    os._exit(0)

            if SNPOOL is not None:
                logger.info('Terminating the auto-snatch thread.')
                try:
                    SNPOOL.join(10)
                    logger.info('Joined pool for termination -  successful')
                except KeyboardInterrupt:
                    SNATCHED_QUEUE.put('exit')
                    SNPOOL.join(5)
                except AssertionError:
                    os._exit(0)
            _INITIALIZED = False
Example #25
0
def start():
    
    global __INITIALIZED__, started
    
    if __INITIALIZED__:
    
        # Start our scheduled background tasks
        #from mylar import updater, searcher, librarysync, postprocessor

        from mylar import updater, search, weeklypull

        SCHED.add_interval_job(updater.dbUpdate, hours=48)
        SCHED.add_interval_job(search.searchforissue, minutes=SEARCH_INTERVAL)
        #SCHED.add_interval_job(librarysync.libraryScan, minutes=LIBRARYSCAN_INTERVAL)

        #weekly pull list gets messed up if it's not populated first, so let's populate it then set the scheduler.
        logger.info("Checking for existance of Weekly Comic listing...")
        PULLNEW = 'no'  #reset the indicator here.
        threading.Thread(target=weeklypull.pullit).start()
        #now the scheduler (check every 24 hours)
        SCHED.add_interval_job(weeklypull.pullit, hours=24)
        
        #let's do a run at the Wanted issues here (on startup) if enabled.
        if NZB_STARTUP_SEARCH:
            threading.Thread(target=search.searchforissue).start()

        if CHECK_GITHUB:
            SCHED.add_interval_job(versioncheck.checkGithub, minutes=CHECK_GITHUB_INTERVAL)
        
        #SCHED.add_interval_job(postprocessor.checkFolder, minutes=DOWNLOAD_SCAN_INTERVAL)

        SCHED.start()
        
        started = True
Example #26
0
 def get_the_hash(self, filepath):
     # Open torrent file
     torrent_file = open(filepath, "rb")
     metainfo = bencode.decode(torrent_file.read())
     info = metainfo['info']
     thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
     logger.info('Hash: ' + thehash)
     return thehash
Example #27
0
 def unqueueissue(self, IssueID, ComicID):
     myDB = db.DBConnection()
     issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
     logger.info(u"Marking " + issue['ComicName'] + " issue # " + issue['Issue_Number']  + " as skipped...")
     controlValueDict = {'IssueID': IssueID}
     newValueDict = {'Status': 'Skipped'}
     myDB.upsert("issues", newValueDict, controlValueDict)
     raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % ComicID)
Example #28
0
    def run(self):

        logger.info('[SEARCH] Running Search for Wanted.')
        helpers.job_management(write=True, job='Auto-Search', current_run=helpers.utctimestamp(), status='Running')
        mylar.SEARCH_STATUS = 'Running'
        mylar.search.searchforissue()
        helpers.job_management(write=True, job='Auto-Search', last_run_completed=helpers.utctimestamp(), status='Waiting')
        mylar.SEARCH_STATUS = 'Waiting'
Example #29
0
 def run(self):
     logger.info('[WEEKLY] Checking Weekly Pull-list for new releases/updates')
     helpers.job_management(write=True, job='Weekly Pullist', current_run=helpers.utctimestamp(), status='Running')
     mylar.WEEKLY_STATUS = 'Running'
     weeklypull.pullit()
     weeklypull.future_check()
     helpers.job_management(write=True, job='Weekly Pullist', last_run_completed=helpers.utctimestamp(), status='Waiting')
     mylar.WEEKLY_STATUS = 'Waiting'
Example #30
0
 def setlabel(self, hash):
     params = {'token': self.token, 'action': 'setprops', 'hash': hash, 's': 'label', 'v': str(mylar.UTORRENT_LABEL)}
     r = requests.post(url=self.utorrent_url, auth=self.auth, cookies=self.cookies, params=params)
     if str(r.status_code) == '200':
         logger.info('label ' + str(mylar.UTORRENT_LABEL) + ' successfully applied')
     else:
         logger.info('Unable to label torrent')
     return
Example #31
0
    def test_notify(self):
        module = '[TEST-NOTIFIER]'
        try:
            r = self._session.get(self.TEST_NMA_URL,
                                  params={'apikey': self.apikey},
                                  verify=True)
        except requests.exceptions.RequestException as e:
            logger.error(
                module + '[' + str(e) +
                '] Unable to send via NMA. Aborting test notification - something is probably wrong...'
            )
            return {'status': False, 'message': str(e)}

        logger.fdebug('[NMA] Status code returned: ' + str(r.status_code))
        if r.status_code == 200:
            from xml.dom.minidom import parseString
            dom = parseString(r.content)
            try:
                success_info = dom.getElementsByTagName('success')
                success_code = success_info[0].getAttribute('code')
            except:
                error_info = dom.getElementsByTagName('error')
                error_code = error_info[0].getAttribute('code')
                error_message = error_info[0].childNodes[0].nodeValue
                logger.info(module + '[' + str(error_code) + '] ' +
                            error_message)
                return {
                    'status': False,
                    'message': '[' + str(error_code) + '] ' + error_message
                }

            else:
                logger.info(
                    module + '[' + str(success_code) +
                    '] NotifyMyAndroid apikey valid. Testing notification service with it.'
                )
        elif r.status_code >= 400 and r.status_code < 500:
            logger.error(module +
                         ' NotifyMyAndroid request failed: %s' % r.content)
            return {
                'status': False,
                'message':
                'Unable to send request to NMA - check your connection.'
            }
        else:
            logger.error(module +
                         ' NotifyMyAndroid notification failed serverside.')
            return {
                'status': False,
                'message': 'Internal Server Error. Try again later.'
            }

        event = 'Test Message'
        description = 'ZOMG Lazors PewPewPew!'
        data = {
            'apikey': self.apikey,
            'application': 'Mylar',
            'event': event.encode('utf-8'),
            'description': description.encode('utf-8'),
            'priority': 2
        }

        return self._send(data, '[NOTIFIER]')
Example #32
0
def update():

    if mylar.INSTALL_TYPE == 'win':

        logger.info('Windows .exe updating not supported yet.')
        pass

    elif mylar.INSTALL_TYPE == 'git':

        output, err = runGit('pull origin ' + mylar.CONFIG.GIT_BRANCH)

        if output is None:
            logger.error('Couldn\'t download latest version')
            return

        for line in output.split('\n'):

            if 'Already up-to-date.' in line:
                logger.info('No update available, not updating')
                logger.info('Output: ' + str(output))
            elif line.endswith('Aborting.'):
                logger.error('Unable to update from git: ' + line)
                logger.info('Output: ' + str(output))

    else:

        tar_download_url = 'https://github.com/%s/mylar/tarball/%s' % (
            mylar.CONFIG.GIT_USER, mylar.CONFIG.GIT_BRANCH)
        update_dir = os.path.join(mylar.PROG_DIR, 'update')
        version_path = os.path.join(mylar.PROG_DIR, 'version.txt')

        try:
            logger.info('Downloading update from: ' + tar_download_url)
            response = requests.get(tar_download_url, verify=True, stream=True)
        except (IOError, urllib2.URLError):
            logger.error("Unable to retrieve new version from " +
                         tar_download_url + ", can't update")
            return

        #try sanitizing the name here...
        download_name = mylar.CONFIG.GIT_BRANCH + '-github'  #data.geturl().split('/')[-1].split('?')[0]
        tar_download_path = os.path.join(mylar.PROG_DIR, download_name)

        # Save tar to disk
        with open(tar_download_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=1024):
                if chunk:  # filter out keep-alive new chunks
                    f.write(chunk)
                    f.flush()

        # Extract the tar to update folder
        logger.info('Extracting file' + tar_download_path)
        tar = tarfile.open(tar_download_path)
        tar.extractall(update_dir)
        tar.close()

        # Delete the tar.gz
        logger.info('Deleting file' + tar_download_path)
        os.remove(tar_download_path)

        # Find update dir name
        update_dir_contents = [
            x for x in os.listdir(update_dir)
            if os.path.isdir(os.path.join(update_dir, x))
        ]
        if len(update_dir_contents) != 1:
            logger.error(u"Invalid update data, update failed: " +
                         str(update_dir_contents))
            return
        content_dir = os.path.join(update_dir, update_dir_contents[0])

        # walk temp folder and move files to main folder
        for dirname, dirnames, filenames in os.walk(content_dir):
            dirname = dirname[len(content_dir) + 1:]
            for curfile in filenames:
                old_path = os.path.join(content_dir, dirname, curfile)
                new_path = os.path.join(mylar.PROG_DIR, dirname, curfile)

                if os.path.isfile(new_path):
                    os.remove(new_path)
                os.renames(old_path, new_path)

        # Update version.txt
        try:
            with open(version_path, 'w') as f:
                f.write(str(mylar.LATEST_VERSION))
        except IOError as e:
            logger.error(
                "Unable to write current version to version.txt, update not complete: %s"
                % ex(e))
            return
Example #33
0
    def historycheck(self, nzbinfo, roundtwo=False):
        sendresponse = nzbinfo['nzo_id']
        hist_params = {
            'mode': 'history',
            'category': mylar.CONFIG.SAB_CATEGORY,
            'failed': 0,
            'output': 'json',
            'apikey': mylar.CONFIG.SAB_APIKEY
        }

        sab_check = None
        if mylar.CONFIG.SAB_VERSION is None:
            try:
                sc = mylar.webserve.WebInterface()
                sab_check = sc.SABtest(sabhost=mylar.CONFIG.SAB_HOST,
                                       sabusername=mylar.CONFIG.SAB_USERNAME,
                                       sabpassword=mylar.CONFIG.SAB_PASSWORD,
                                       sabapikey=mylar.CONFIG.SAB_APIKEY)
            except Exception as e:
                logger.warn(
                    '[SABNZBD-VERSION-TEST] Exception encountered trying to retrieve SABnzbd version: %s. Setting history length to last 200 items.'
                    % e)
                hist_params['limit'] = 200
                sab_check = 'some value'
            else:
                sab_check = None

        if sab_check is None:
            #set min_sab to 3.2.0 since 3.2.0 beta 1 has the api call for history search by nzo_id
            try:
                sab_minimum_version = '3.2.0'
                min_sab = re.sub('[^0-9]', '', sab_minimum_version)
                sab_vers = mylar.CONFIG.SAB_VERSION
                if 'beta' in sab_vers:
                    sab_vers = re.sub('[^0-9]', '', sab_vers)
                    if len(sab_vers) > 3:
                        sab_vers = sab_vers[:
                                            -1]  # remove beta value entirely...
                if parse_version(sab_vers) >= parse_version(min_sab):
                    logger.fdebug(
                        'SABnzbd version is higher than 3.2.0. Querying history based on nzo_id directly.'
                    )
                    hist_params['nzo_ids'] = sendresponse
                else:
                    logger.fdebug(
                        'SABnzbd version is less than 3.2.0. Querying history based on history size of 200.'
                    )
                    hist_params['limit'] = 200
            except Exception as e:
                logger.warn(
                    '[SABNZBD-VERSION-CHECK] Exception encountered trying to compare installed version [%s] to [%s]. Setting history length to last 200 items. (error: %s)'
                    % (mylar.CONFIG.SAB_VERSION, sab_minimum_version, e))
                hist_params['limit'] = 200

        hist = requests.get(self.sab_url, params=hist_params, verify=False)
        historyresponse = hist.json()
        #logger.info(historyresponse)
        histqueue = historyresponse['history']
        found = {'status': False}
        nzo_exists = False

        try:
            for hq in histqueue['slots']:
                logger.fdebug('nzo_id: %s --- %s [%s]' %
                              (hq['nzo_id'], sendresponse, hq['status']))
                if hq['nzo_id'] == sendresponse and any([
                        hq['status'] == 'Completed', hq['status'] == 'Running',
                        'comicrn' in hq['script'].lower()
                ]):
                    nzo_exists = True
                    logger.info(
                        'found matching completed item in history. Job has a status of %s'
                        % hq['status'])
                    if 'comicrn' in hq['script'].lower():
                        logger.warn(
                            'ComicRN has been detected as being active for this category & download. Completed Download Handling will NOT be performed due to this.'
                        )
                        logger.warn(
                            'Either disable Completed Download Handling for SABnzbd within Mylar, or remove ComicRN from your category script in SABnzbd.'
                        )
                        return {'status': 'double-pp', 'failed': False}

                    if os.path.isfile(hq['storage']):
                        logger.fdebug('location found @ %s' % hq['storage'])
                        found = {
                            'status':
                            True,
                            'name':
                            ntpath.basename(
                                hq['storage']
                            ),  #os.pathre.sub('.nzb', '', hq['nzb_name']).strip(),
                            'location':
                            os.path.abspath(
                                os.path.join(hq['storage'], os.pardir)),
                            'failed':
                            False,
                            'issueid':
                            nzbinfo['issueid'],
                            'comicid':
                            nzbinfo['comicid'],
                            'apicall':
                            True,
                            'ddl':
                            False
                        }
                        break
                    else:
                        logger.error(
                            'no file found where it should be @ %s - is there another script that moves things after completion ?'
                            % hq['storage'])
                        return {'status': 'file not found', 'failed': False}

                elif hq['nzo_id'] == sendresponse and hq['status'] == 'Failed':
                    nzo_exists = True
                    #get the stage / error message and see what we can do
                    stage = hq['stage_log']
                    logger.fdebug('stage: %s' % (stage, ))
                    for x in stage:
                        if 'Failed' in x['actions'] and any(
                            [x['name'] == 'Unpack', x['name'] == 'Repair']):
                            if 'moving' in x['actions']:
                                logger.warn(
                                    'There was a failure in SABnzbd during the unpack/repair phase that caused a failure: %s'
                                    % x['actions'])
                            else:
                                logger.warn(
                                    'Failure occured during the Unpack/Repair phase of SABnzbd. This is probably a bad file: %s'
                                    % x['actions'])
                                if mylar.FAILED_DOWNLOAD_HANDLING is True:
                                    found = {
                                        'status':
                                        True,
                                        'name':
                                        re.sub('.nzb', '',
                                               hq['nzb_name']).strip(),
                                        'location':
                                        os.path.abspath(
                                            os.path.join(
                                                hq['storage'], os.pardir)),
                                        'failed':
                                        True,
                                        'issueid':
                                        sendresponse['issueid'],
                                        'comicid':
                                        sendresponse['comicid'],
                                        'apicall':
                                        True,
                                        'ddl':
                                        False
                                    }
                            break
                    if found['status'] is False:
                        return {'status': 'failed_in_sab', 'failed': False}
                    else:
                        break
                elif hq['nzo_id'] == sendresponse:
                    nzo_exists = True
                    logger.fdebug(
                        'nzo_id: %s found while processing queue in an unhandled status: %s'
                        % (hq['nzo_id'], hq['status']))
                    if hq['status'] == 'Queued' and roundtwo is False:
                        time.sleep(4)
                        return self.historycheck(nzbinfo, roundtwo=True)
                    else:
                        return {
                            'failed': False,
                            'status':
                            'unhandled status of: %s' % (hq['status'])
                        }

            if not nzo_exists:
                logger.error(
                    'Cannot find nzb %s in the queue.  Was it removed?' %
                    sendresponse)
                time.sleep(5)
                if roundtwo is False:
                    return self.historycheck(nzbinfo, roundtwo=True)
                else:
                    return {'status': 'nzb removed', 'failed': False}
        except Exception as e:
            logger.warn('error %s' % e)
            return {'status': False, 'failed': False}

        return found
Example #34
0
    def load_torrent(self, filepath):
        
        if not filepath.startswith('magnet'):
            logger.info('filepath to torrent file set to : ' + filepath)
                
        if self.client._is_authenticated is True:
            logger.info('Checking if Torrent Exists!')

            if filepath.startswith('magnet'):
                torrent_hash = re.findall("urn:btih:([\w]{32,40})", filepath)[0]
                if len(torrent_hash) == 32:
                    torrent_hash = b16encode(b32decode(torrent_hash)).lower()
                hash = torrent_hash.upper()
                logger.debug('Magnet (load_torrent) initiating')
            else:
                hash = self.get_the_hash(filepath)
                logger.debug('FileName (load_torrent): ' + str(os.path.basename(filepath)))

            logger.debug('Torrent Hash (load_torrent): "' + hash + '"')


            #Check if torrent already added
            if self.find_torrent(hash):
                logger.info('load_torrent: Torrent already exists!')
                return {'status': False}
                #should set something here to denote that it's already loaded, and then the failed download checker not run so it doesn't download
                #multiple copies of the same issues that's already downloaded
            else:
                logger.info('Torrent not added yet, trying to add it now!')
                if filepath.startswith('magnet'):
                    try:
                        tid = self.client.download_from_link(filepath, category=str(mylar.CONFIG.QBITTORRENT_LABEL))
                    except Exception as e:
                        logger.debug('Torrent not added')
                        return {'status': False}
                    else:
                        logger.debug('Successfully submitted for add as a magnet. Verifying item is now on client.')   
                else:
                    try:
                        torrent_content = open(filepath, 'rb')
                        tid = self.client.download_from_file(torrent_content, category=str(mylar.CONFIG.QBITTORRENT_LABEL))
                    except Exception as e:
                        logger.debug('Torrent not added')
                        return {'status': False}
                    else:
                        logger.debug('Successfully submitted for add via file. Verifying item is now on client.')

            if mylar.CONFIG.QBITTORRENT_STARTONLOAD:
                logger.info('attempting to start')
                startit = self.client.force_start(hash)
                logger.info('startit returned:' + str(startit))
            else:
                logger.info('attempting to pause torrent incase it starts')
                try:
                    startit = self.client.pause(hash)
                    logger.info('startit paused:' + str(startit))
                except:
                    logger.warn('Unable to pause torrent - possibly already paused?')

        try:
            time.sleep(5) # wait 5 in case it's not populated yet.
            tinfo = self.get_torrent(hash)
        except Exception as e:
            logger.warn('Torrent was not added! Please check logs')
            return {'status': False}
        else:
            logger.info('Torrent successfully added!')
            filelist = self.client.get_torrent_files(hash)
            #logger.info(filelist)
            if len(filelist) == 1:
                to_name = filelist[0]['name']
            else:
                to_name = tinfo['save_path']
 
            torrent_info = {'hash':             hash,
                            'files':            filelist,
                            'name':             to_name,
                            'total_filesize':   tinfo['total_size'],
                            'folder':           tinfo['save_path'],
                            'time_started':     tinfo['addition_date'],
                            'label':            mylar.CONFIG.QBITTORRENT_LABEL,
                            'status':           True}

            #logger.info(torrent_info)
            return torrent_info
Example #35
0
def populate(link, publisher, shipdate):
    #this is the secondary url call to populate
    input = 'http://www.comicbookresources.com/' + link
    #print 'checking ' + str(input)
    response = urllib2.urlopen(input)
    soup = BeautifulSoup(response)
    abc = soup.findAll('p')
    lenabc = len(abc)
    i = 0
    resultName = []
    resultID = []
    resultURL = []
    matched = "no"
    upcome = []
    get_next = False
    prev_chk = False

    while (i < lenabc):
        titlet = abc[i]  #iterate through the p pulling out only results.
        titlet_next = titlet.findNext(text=True)
        #print ("titlet: " + str(titlet))
        if "/prev_img.php?pid" in str(titlet) and titlet_next is None:
            #solicits in 03-2014 have seperated <p> tags, so we need to take the subsequent <p>, not the initial.
            prev_chk = False
            get_next = True
            i += 1
            continue
        elif titlet_next is not None:
            #logger.fdebug('non seperated <p> tags - taking next text.')
            get_next = False
            prev_chk = True

        elif "/news/preview2.php" in str(titlet):
            prev_chk = True
            get_next = False
        elif get_next == True:
            prev_chk = True
        else:
            prev_chk = False
            get_next = False

        if prev_chk == True:
            tempName = titlet.findNext(text=True)
            if not any([
                    ' TPB' in tempName, 'HC' in tempName, 'GN-TPB' in tempName,
                    'for $1' in tempName.lower(), 'subscription variant'
                    in tempName.lower(), 'poster' in tempName.lower()
            ]):
                if '#' in tempName[:50]:
                    #tempName = tempName.replace(u'.',u"'")
                    tempName = tempName.encode('ascii',
                                               'replace')  #.decode('utf-8')
                    if '???' in tempName:
                        tempName = tempName.replace('???', ' ')
                    stissue = tempName.find('#')
                    endissue = tempName.find(' ', stissue)
                    if tempName[
                            stissue +
                            1] == ' ':  #if issue has space between # and number, adjust.
                        endissue = tempName.find(' ', stissue + 2)
                    if endissue == -1: endissue = len(tempName)
                    issue = tempName[stissue:endissue].lstrip(' ')
                    if ':' in issue: issue = re.sub(':', '', issue).rstrip()
                    exinfo = tempName[endissue:].lstrip(' ')

                    issue1 = None
                    issue2 = None

                    if '-' in issue:
                        #print ('multiple issues detected. Splitting.')
                        ststart = issue.find('-')
                        issue1 = issue[:ststart]
                        issue2 = '#' + str(issue[ststart + 1:])

                    if '&' in exinfo:
                        #print ('multiple issues detected. Splitting.')
                        ststart = exinfo.find('&')
                        issue1 = issue  # this detects fine
                        issue2 = '#' + str(exinfo[ststart + 1:])
                        if '& ' in issue2: issue2 = re.sub("&\\b", "", issue2)
                        exinfo = exinfo.replace(
                            exinfo[ststart + 1:len(issue2)], '').strip()
                        if exinfo == '&': exinfo = 'N/A'

                    comic = tempName[:stissue].strip()

                    if 'for \$1' in comic:
                        exinfo = 'for $1'
                        comic = comic.replace('for \$1\:', '').lstrip()

                    issuedate = shipdate
                    if 'on sale' in str(titlet).lower():
                        onsale_start = str(titlet).lower().find('on sale') + 8
                        onsale_end = str(titlet).lower().find(
                            '<br>', onsale_start)
                        thedate = str(titlet)[onsale_start:onsale_end]
                        m = None

                        basemonths = {
                            'january': '1',
                            'jan': '1',
                            'february': '2',
                            'feb': '2',
                            'march': '3',
                            'mar': '3',
                            'april': '4',
                            'apr': '4',
                            'may': '5',
                            'june': '6',
                            'july': '7',
                            'august': '8',
                            'aug': '8',
                            'september': '9',
                            'sept': '9',
                            'october': '10',
                            'oct': '10',
                            'november': '11',
                            'nov': '11',
                            'december': '12',
                            'dec': '12'
                        }
                        for month in basemonths:
                            if month in thedate.lower():
                                m = basemonths[month]
                                monthname = month
                                break

                        if m is not None:
                            theday = len(
                                month
                            ) + 1  # account for space between month & day
                            thedaystart = thedate[theday:(
                                theday +
                                2)].strip()  # day numeric won't exceed 2
                            if len(str(thedaystart)) == 1:
                                thedaystart = '0' + str(thedaystart)
                            if len(str(m)) == 1:
                                m = '0' + str(m)
                            thedate = shipdate[-4:] + '-' + str(m) + '-' + str(
                                thedaystart)

                        logger.info('[' + comic + '] On sale :' + str(thedate))
                        exinfo += ' [' + str(thedate) + ']'
                        issuedate = thedate

                    if issue1:
                        upcome.append({
                            'Shipdate': issuedate,
                            'Publisher': publisher.upper(),
                            'Issue': re.sub('#', '', issue1).lstrip(),
                            'Comic': comic.upper(),
                            'Extra': exinfo.upper()
                        })
                        #print ('Comic: ' + comic)
                        #print('issue#: ' + re.sub('#', '', issue1))
                        #print ('extra info: ' + exinfo)
                        if issue2:
                            upcome.append({
                                'Shipdate':
                                issuedate,
                                'Publisher':
                                publisher.upper(),
                                'Issue':
                                re.sub('#', '', issue2).lstrip(),
                                'Comic':
                                comic.upper(),
                                'Extra':
                                exinfo.upper()
                            })
                            #print ('Comic: ' + comic)
                            #print('issue#: ' + re.sub('#', '', issue2))
                            #print ('extra info: ' + exinfo)
                    else:
                        upcome.append({
                            'Shipdate': issuedate,
                            'Publisher': publisher.upper(),
                            'Issue': re.sub('#', '', issue).lstrip(),
                            'Comic': comic.upper(),
                            'Extra': exinfo.upper()
                        })
                        #print ('Comic: ' + comic)
                        #print ('issue#: ' + re.sub('#', '', issue))
                        #print ('extra info: ' + exinfo)
                else:
                    pass
                    #print ('no issue # to retrieve.')
        i += 1
    return upcome
Example #36
0
def findComic(name, mode, issue, limityear=None, type=None):

    #with mb_lock:
    comicResults = None
    comicLibrary = listLibrary()
    comiclist = []
    arcinfolist = []

    commons = ['and', 'the', '&', '-']
    for x in commons:
        cnt = 0
        for m in re.finditer(x, name.lower()):
            cnt += 1
            tehstart = m.start()
            tehend = m.end()
            if any([x == 'the', x == 'and']):
                if len(name) == tehend:
                    tehend = -1
                if not all([tehstart == 0, name[tehend] == ' ']) or not all([
                        tehstart != 0, name[tehstart - 1] == ' ', name[tehend]
                        == ' '
                ]):
                    continue
            else:
                name = name.replace(x, ' ', cnt)

    originalname = name
    if '+' in name:
        name = re.sub('\+', 'PLUS', name)

    pattern = re.compile(r'\w+', re.UNICODE)
    name = pattern.findall(name)

    if '+' in originalname:
        y = []
        for x in name:
            y.append(re.sub("PLUS", "%2B", x))
        name = y

    if limityear is None: limityear = 'None'

    comicquery = name

    if mylar.CONFIG.COMICVINE_API == 'None' or mylar.CONFIG.COMICVINE_API is None:
        logger.warn(
            'You have not specified your own ComicVine API key - this is a requirement. Get your own @ http://api.comicvine.com.'
        )
        return
    else:
        comicapi = mylar.CONFIG.COMICVINE_API

    if type is None:
        type = 'volume'

    #let's find out how many results we get from the query...
    searched = pullsearch(comicapi, comicquery, 0, type)
    if searched is None:
        return False
    totalResults = searched.getElementsByTagName(
        'number_of_total_results')[0].firstChild.wholeText
    logger.fdebug("there are " + str(totalResults) + " search results...")
    if not totalResults:
        return False
    if int(totalResults) > 1000:
        logger.warn(
            'Search returned more than 1000 hits [' + str(totalResults) +
            ']. Only displaying first 1000 results - use more specifics or the exact ComicID if required.'
        )
        totalResults = 1000
    countResults = 0
    while (countResults < int(totalResults)):
        #logger.fdebug("querying " + str(countResults))
        if countResults > 0:
            offsetcount = countResults

            searched = pullsearch(comicapi, comicquery, offsetcount, type)
        comicResults = searched.getElementsByTagName(type)
        body = ''
        n = 0
        if not comicResults:
            break
        for result in comicResults:
            #retrieve the first xml tag (<tag>data</tag>)
            #that the parser finds with name tagName:
            arclist = []
            if type == 'story_arc':
                #call cv.py here to find out issue count in story arc
                try:
                    logger.fdebug('story_arc ascension')
                    names = len(result.getElementsByTagName('name'))
                    n = 0
                    logger.fdebug('length: ' + str(names))
                    xmlpub = None  #set this incase the publisher field isn't populated in the xml
                    while (n < names):
                        logger.fdebug(
                            result.getElementsByTagName('name')
                            [n].parentNode.nodeName)
                        if result.getElementsByTagName(
                                'name')[n].parentNode.nodeName == 'story_arc':
                            logger.fdebug('yes')
                            try:
                                xmlTag = result.getElementsByTagName(
                                    'name')[n].firstChild.wholeText
                                xmlTag = xmlTag.rstrip()
                                logger.fdebug('name: ' + xmlTag)
                            except:
                                logger.error(
                                    'There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.'
                                )
                                return

                        elif result.getElementsByTagName(
                                'name')[n].parentNode.nodeName == 'publisher':
                            logger.fdebug('publisher check.')
                            xmlpub = result.getElementsByTagName(
                                'name')[n].firstChild.wholeText

                        n += 1
                except:
                    logger.warn('error retrieving story arc search results.')
                    return

                siteurl = len(result.getElementsByTagName('site_detail_url'))
                s = 0
                logger.fdebug('length: ' + str(names))
                xmlurl = None
                while (s < siteurl):
                    logger.fdebug(
                        result.getElementsByTagName('site_detail_url')
                        [s].parentNode.nodeName)
                    if result.getElementsByTagName('site_detail_url')[
                            s].parentNode.nodeName == 'story_arc':
                        try:
                            xmlurl = result.getElementsByTagName(
                                'site_detail_url')[s].firstChild.wholeText
                        except:
                            logger.error(
                                'There was a problem retrieving the given data from ComicVine. Ensure that www.comicvine.com is accessible.'
                            )
                            return
                    s += 1

                xmlid = result.getElementsByTagName(
                    'id')[0].firstChild.wholeText

                if xmlid is not None:
                    arcinfolist = storyarcinfo(xmlid)
                    logger.info('[IMAGE] : ' + arcinfolist['comicimage'])
                    comiclist.append({
                        'name': xmlTag,
                        'comicyear': arcinfolist['comicyear'],
                        'comicid': xmlid,
                        'cvarcid': xmlid,
                        'url': xmlurl,
                        'issues': arcinfolist['issues'],
                        'comicimage': arcinfolist['comicimage'],
                        'publisher': xmlpub,
                        'description': arcinfolist['description'],
                        'deck': arcinfolist['deck'],
                        'arclist': arcinfolist['arclist'],
                        'haveit': arcinfolist['haveit']
                    })
                else:
                    comiclist.append({
                        'name': xmlTag,
                        'comicyear': arcyear,
                        'comicid': xmlid,
                        'url': xmlurl,
                        'issues': issuecount,
                        'comicimage': xmlimage,
                        'publisher': xmlpub,
                        'description': xmldesc,
                        'deck': xmldeck,
                        'arclist': arclist,
                        'haveit': haveit
                    })

                    logger.fdebug('IssueID\'s that are a part of ' + xmlTag +
                                  ' : ' + str(arclist))
            else:
                xmlcnt = result.getElementsByTagName(
                    'count_of_issues')[0].firstChild.wholeText
                #here we can determine what called us, and either start gathering all issues or just limited ones.
                if issue is not None and str(issue).isdigit():
                    #this gets buggered up with NEW/ONGOING series because the db hasn't been updated
                    #to reflect the proper count. Drop it by 1 to make sure.
                    limiter = int(issue) - 1
                else:
                    limiter = 0
                #get the first issue # (for auto-magick calcs)

                iss_len = len(result.getElementsByTagName('name'))
                i = 0
                xmlfirst = '1'
                xmllast = None
                try:
                    while (i < iss_len):
                        if result.getElementsByTagName('name')[
                                i].parentNode.nodeName == 'first_issue':
                            xmlfirst = result.getElementsByTagName(
                                'issue_number')[i].firstChild.wholeText
                            if '\xbd' in xmlfirst:
                                xmlfirst = '1'  #if the first issue is 1/2, just assume 1 for logistics
                        elif result.getElementsByTagName(
                                'name')[i].parentNode.nodeName == 'last_issue':
                            xmllast = result.getElementsByTagName(
                                'issue_number')[i].firstChild.wholeText
                        if all([xmllast is not None, xmlfirst is not None]):
                            break
                        i += 1
                except:
                    xmlfirst = '1'

                if all(
                    [xmlfirst == xmllast,
                     xmlfirst.isdigit(), xmlcnt == '0']):
                    xmlcnt = '1'

                #logger.info('There are : ' + str(xmlcnt) + ' issues in this series.')
                #logger.info('The first issue started at # ' + str(xmlfirst))
                d = decimal.Decimal(xmlfirst)
                if d < 1:
                    cnt_numerical = int(xmlcnt) + 1
                else:
                    cnt_numerical = int(xmlcnt) + int(
                        math.ceil(d)
                    )  # (of issues + start of first issue = numerical range)

                #logger.info('The maximum issue number should be roughly # ' + str(cnt_numerical))
                #logger.info('The limiter (issue max that we know of) is # ' + str(limiter))
                if cnt_numerical >= limiter:
                    cnl = len(result.getElementsByTagName('name'))
                    cl = 0
                    xmlTag = 'None'
                    xmlimage = "cache/blankcover.jpg"
                    xml_lastissueid = 'None'
                    while (cl < cnl):
                        if result.getElementsByTagName(
                                'name')[cl].parentNode.nodeName == 'volume':
                            xmlTag = result.getElementsByTagName(
                                'name')[cl].firstChild.wholeText
                            #break

                        if result.getElementsByTagName(
                                'name')[cl].parentNode.nodeName == 'image':
                            xmlimage = result.getElementsByTagName(
                                'super_url')[0].firstChild.wholeText

                        if result.getElementsByTagName('name')[
                                cl].parentNode.nodeName == 'last_issue':
                            xml_lastissueid = result.getElementsByTagName(
                                'id')[cl].firstChild.wholeText
                        cl += 1

                    if (result.getElementsByTagName('start_year')[0].firstChild
                        ) is not None:
                        xmlYr = result.getElementsByTagName(
                            'start_year')[0].firstChild.wholeText
                    else:
                        xmlYr = "0000"

                    yearRange = []
                    tmpYr = re.sub('\?', '', xmlYr)

                    if tmpYr.isdigit():

                        yearRange.append(tmpYr)
                        tmpyearRange = int(xmlcnt) / 12
                        if float(tmpyearRange): tmpyearRange + 1
                        possible_years = int(tmpYr) + tmpyearRange

                        for i in range(int(tmpYr), int(possible_years), 1):
                            if not any(int(x) == int(i) for x in yearRange):
                                yearRange.append(str(i))

                    logger.fdebug('[RESULT][' + str(limityear) +
                                  '] ComicName:' + xmlTag + ' -- ' +
                                  str(xmlYr) + ' [Series years: ' +
                                  str(yearRange) + ']')
                    if tmpYr != xmlYr:
                        xmlYr = tmpYr

                    if any([v in limityear
                            for v in yearRange]) or limityear == 'None':
                        xmlurl = result.getElementsByTagName(
                            'site_detail_url')[0].firstChild.wholeText
                        idl = len(result.getElementsByTagName('id'))
                        idt = 0
                        xmlid = None
                        while (idt < idl):
                            if result.getElementsByTagName(
                                    'id')[idt].parentNode.nodeName == 'volume':
                                xmlid = result.getElementsByTagName(
                                    'id')[idt].firstChild.wholeText
                                break
                            idt += 1

                        if xmlid is None:
                            logger.error(
                                'Unable to figure out the comicid - skipping this : '
                                + str(xmlurl))
                            continue

                        publishers = result.getElementsByTagName('publisher')
                        if len(publishers) > 0:
                            pubnames = publishers[0].getElementsByTagName(
                                'name')
                            if len(pubnames) > 0:
                                xmlpub = pubnames[0].firstChild.wholeText
                            else:
                                xmlpub = "Unknown"
                        else:
                            xmlpub = "Unknown"

                        #ignore specific publishers on a global scale here.
                        if mylar.CONFIG.BLACKLISTED_PUBLISHERS is not None and any(
                            [
                                x for x in mylar.CONFIG.BLACKLISTED_PUBLISHERS
                                if x.lower() == xmlpub.lower()
                            ]):
                            logger.fdebug('Blacklisted publisher [' + xmlpub +
                                          ']. Ignoring this result.')
                            continue

                        try:
                            xmldesc = result.getElementsByTagName(
                                'description')[0].firstChild.wholeText
                        except:
                            xmldesc = "None"

                        #this is needed to display brief synopsis for each series on search results page.
                        try:
                            xmldeck = result.getElementsByTagName(
                                'deck')[0].firstChild.wholeText
                        except:
                            xmldeck = "None"

                        xmltype = None
                        if xmldeck != 'None':
                            if any([
                                    'print' in xmldeck.lower(), 'digital'
                                    in xmldeck.lower(), 'paperback'
                                    in xmldeck.lower(), 'one shot'
                                    in re.sub('-', '',
                                              xmldeck.lower()).strip(),
                                    'hardcover' in xmldeck.lower()
                            ]):
                                if all([
                                        'print' in xmldeck.lower(), 'reprint'
                                        not in xmldeck.lower()
                                ]):
                                    xmltype = 'Print'
                                elif 'digital' in xmldeck.lower():
                                    xmltype = 'Digital'
                                elif 'paperback' in xmldeck.lower():
                                    xmltype = 'TPB'
                                elif 'hardcover' in xmldeck.lower():
                                    xmltype = 'HC'
                                elif 'oneshot' in re.sub(
                                        '-', '', xmldeck.lower()).strip():
                                    xmltype = 'One-Shot'
                                else:
                                    xmltype = 'Print'

                        if xmldesc != 'None' and xmltype is None:
                            if 'print' in xmldesc[:60].lower() and all([
                                    'print edition can be found'
                                    not in xmldesc.lower(), 'reprints'
                                    not in xmldesc.lower()
                            ]):
                                xmltype = 'Print'
                            elif 'digital' in xmldesc[:60].lower(
                            ) and 'digital edition can be found' not in xmldesc.lower(
                            ):
                                xmltype = 'Digital'
                            elif all([
                                    'paperback' in xmldesc[:60].lower(),
                                    'paperback can be found'
                                    not in xmldesc.lower()
                            ]) or 'collects' in xmldesc[:60].lower():
                                xmltype = 'TPB'
                            elif 'hardcover' in xmldesc[:60].lower(
                            ) and 'hardcover can be found' not in xmldesc.lower(
                            ):
                                xmltype = 'HC'
                            elif any([
                                    'one-shot' in xmldesc[:60].lower(),
                                    'one shot' in xmldesc[:60].lower()
                            ]) and any([
                                    'can be found' not in xmldesc.lower(),
                                    'following the' not in xmldesc.lower()
                            ]):
                                i = 0
                                xmltype = 'One-Shot'
                                avoidwords = [
                                    'preceding', 'after the special',
                                    'following the'
                                ]
                                while i < 2:
                                    if i == 0:
                                        cbd = 'one-shot'
                                    elif i == 1:
                                        cbd = 'one shot'
                                    tmp1 = xmldesc[:60].lower().find(cbd)
                                    if tmp1 != -1:
                                        for x in avoidwords:
                                            tmp2 = xmldesc[:tmp1].lower().find(
                                                x)
                                            if tmp2 != -1:
                                                xmltype = 'Print'
                                                i = 3
                                                break
                                    i += 1
                            else:
                                xmltype = 'Print'

                        if xmlid in comicLibrary:
                            haveit = comicLibrary[xmlid]
                        else:
                            haveit = "No"
                        comiclist.append({
                            'name': xmlTag,
                            'comicyear': xmlYr,
                            'comicid': xmlid,
                            'url': xmlurl,
                            'issues': xmlcnt,
                            'comicimage': xmlimage,
                            'publisher': xmlpub,
                            'description': xmldesc,
                            'deck': xmldeck,
                            'type': xmltype,
                            'haveit': haveit,
                            'lastissueid': xml_lastissueid,
                            'seriesrange':
                            yearRange  # returning additional information about series run polled from CV
                        })
                        #logger.fdebug('year: %s - constraint met: %s [%s] --- 4050-%s' % (xmlYr,xmlTag,xmlYr,xmlid))
                    else:
                        #logger.fdebug('year: ' + str(xmlYr) + ' -  contraint not met. Has to be within ' + str(limityear))
                        pass
            n += 1
        #search results are limited to 100 and by pagination now...let's account for this.
        countResults = countResults + 100

    return comiclist
Example #37
0
def libraryScan(dir=None,
                append=False,
                ComicID=None,
                ComicName=None,
                cron=None):

    if cron and not mylar.LIBRARYSCAN:
        return

    if not dir:
        dir = mylar.COMIC_DIR

    # If we're appending a dir, it's coming from the post processor which is
    # already bytestring
    if not append:
        dir = dir.encode(mylar.SYS_ENCODING)

    if not os.path.isdir(dir):
        logger.warn('Cannot find directory: %s. Not scanning' %
                    dir.decode(mylar.SYS_ENCODING, 'replace'))
        return

    logger.info('Scanning comic directory: %s' %
                dir.decode(mylar.SYS_ENCODING, 'replace'))

    basedir = dir

    comic_list = []
    comiccnt = 0
    extensions = ('cbr', 'cbz')
    for r, d, f in os.walk(dir):
        #for directory in d[:]:
        #    if directory.startswith("."):
        #        d.remove(directory)
        for files in f:
            if any(files.lower().endswith('.' + x.lower())
                   for x in extensions):
                comic = files
                comicpath = os.path.join(r, files)
                comicsize = os.path.getsize(comicpath)
                print "Comic: " + comic
                print "Comic Path: " + comicpath
                print "Comic Size: " + str(comicsize)

                # We need the unicode path to use for logging, inserting into database
                unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING,
                                                      'replace')

                comiccnt += 1
                comic_dict = {
                    'ComicFilename': comic,
                    'ComicLocation': comicpath,
                    'ComicSize': comicsize,
                    'Unicode_ComicLocation': unicode_comic_path
                }
                comic_list.append(comic_dict)

        logger.info("I've found a total of " + str(comiccnt) +
                    " comics....analyzing now")
        logger.info("comiclist: " + str(comic_list))
    myDB = db.DBConnection()

    #let's load in the watchlist to see if we have any matches.
    logger.info(
        "loading in the watchlist to see if a series is being watched already..."
    )
    watchlist = myDB.select("SELECT * from comics")
    ComicName = []
    DisplayName = []
    ComicYear = []
    ComicPublisher = []
    ComicTotal = []
    ComicID = []
    ComicLocation = []

    AltName = []
    watchcnt = 0

    watch_kchoice = []
    watchchoice = {}
    import_by_comicids = []
    import_comicids = {}

    for watch in watchlist:
        #use the comicname_filesafe to start
        watchdisplaycomic = re.sub('[\_\#\,\/\:\;\!\$\%\&\+\'\?\@]', ' ',
                                   watch['ComicName']).encode('utf-8').strip()
        # let's clean up the name, just in case for comparison purposes...
        watchcomic = re.sub(
            '[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ',
            watch['ComicName_Filesafe']).encode('utf-8').strip()
        #watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip()

        if ' the ' in watchcomic.lower():
            #drop the 'the' from the watchcomic title for proper comparisons.
            watchcomic = watchcomic[-4:]

        alt_chk = "no"  # alt-checker flag (default to no)

        # account for alternate names as well
        if watch['AlternateSearch'] is not None and watch[
                'AlternateSearch'] is not 'None':
            altcomic = re.sub(
                '[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ',
                watch['AlternateSearch']).encode('utf-8').strip()
            #altcomic = re.sub('\s+', ' ', str(altcomic)).strip()
            AltName.append(altcomic)
            alt_chk = "yes"  # alt-checker flag

        ComicName.append(watchcomic)
        DisplayName.append(watchdisplaycomic)
        ComicYear.append(watch['ComicYear'])
        ComicPublisher.append(watch['ComicPublisher'])
        ComicTotal.append(watch['Total'])
        ComicID.append(watch['ComicID'])
        ComicLocation.append(watch['ComicLocation'])
        watchcnt += 1

    logger.info("Successfully loaded " + str(watchcnt) +
                " series from your watchlist.")

    ripperlist = ['digital-', 'empire', 'dcp']

    watchfound = 0

    datelist = [
        'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
        'nov', 'dec'
    ]
    #    datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
    #    #search for number as text, and change to numeric
    #    for numbs in basnumbs:
    #        #print ("numbs:" + str(numbs))
    #        if numbs in ComicName.lower():
    #            numconv = basnumbs[numbs]
    #            #print ("numconv: " + str(numconv))

    for i in comic_list:
        print i['ComicFilename']

        #if mylar.IMP_METADATA:
        #logger.info('metatagging checking enabled.')
        #if read tags is enabled during import, check here.
        #if i['ComicLocation'].endswith('.cbz'):
        #    logger.info('Attempting to read tags present in filename: ' + str(i['ComicLocation']))
        #    issueinfo = helpers.IssueDetails(i['ComicLocation'])
        #    if issueinfo is None:
        #        pass
        #    else:
        #        logger.info('Successfully retrieved some tags. Lets see what I can figure out.')
        #        comicname = issueinfo[0]['series']
        #        logger.fdebug('Series Name: ' + comicname)
        #        issue_number = issueinfo[0]['issue_number']
        #        logger.fdebug('Issue Number: ' + str(issue_number))
        #        issuetitle = issueinfo[0]['title']
        #        logger.fdebug('Issue Title: ' + issuetitle)
        #        issueyear = issueinfo[0]['year']
        #        logger.fdebug('Issue Year: ' + str(issueyear))
        #        # if used by ComicTagger, Notes field will have the IssueID.
        #        issuenotes = issueinfo[0]['notes']
        #        logger.fdebug('Notes: ' + issuenotes)

        comfilename = i['ComicFilename']
        comlocation = i['ComicLocation']
        #let's clean up the filename for matching purposes

        cfilename = re.sub('[\_\#\,\/\:\;\-\!\$\%\&\+\'\?\@]', ' ',
                           comfilename)
        #cfilename = re.sub('\s', '_', str(cfilename))
        d_filename = re.sub('[\_\#\,\/\;\!\$\%\&\?\@]', ' ', comfilename)
        d_filename = re.sub('[\:\-\+\']', '#', d_filename)

        #strip extraspaces
        d_filename = re.sub('\s+', ' ', d_filename)
        cfilename = re.sub('\s+', ' ', cfilename)

        #versioning - remove it
        subsplit = cfilename.replace('_', ' ').split()
        volno = None
        volyr = None
        for subit in subsplit:
            if subit[0].lower() == 'v':
                vfull = 0
                if subit[1:].isdigit():
                    #if in format v1, v2009 etc...
                    if len(subit) > 3:
                        # if it's greater than 3 in length, then the format is Vyyyy
                        vfull = 1  # add on 1 character length to account for extra space
                    cfilename = re.sub(subit, '', cfilename)
                    d_filename = re.sub(subit, '', d_filename)
                    volno = re.sub("[^0-9]", " ", subit)
                elif subit.lower()[:3] == 'vol':
                    #if in format vol.2013 etc
                    #because the '.' in Vol. gets removed, let's loop thru again after the Vol hit to remove it entirely
                    logger.fdebug('volume indicator detected as version #:' +
                                  str(subit))
                    cfilename = re.sub(subit, '', cfilename)
                    cfilename = " ".join(cfilename.split())
                    d_filename = re.sub(subit, '', d_filename)
                    d_filename = " ".join(d_filename.split())
                    volyr = re.sub("[^0-9]", " ", subit).strip()
                    logger.fdebug('volume year set as : ' + str(volyr))
        cm_cn = 0

        #we need to track the counter to make sure we are comparing the right array parts
        #this takes care of the brackets :)
        m = re.findall('[^()]+', cfilename)
        lenm = len(m)
        logger.fdebug("there are " + str(lenm) + " words.")
        cnt = 0
        yearmatch = "false"
        foundonwatch = "False"
        issue = 999999

        while (cnt < lenm):
            if m[cnt] is None: break
            if m[cnt] == ' ':
                pass
            else:
                logger.fdebug(str(cnt) + ". Bracket Word: " + m[cnt])
                if cnt == 0:
                    comic_andiss = m[cnt]
                    logger.fdebug("Comic: " + comic_andiss)
                    # if it's not in the standard format this will bork.
                    # let's try to accomodate (somehow).
                    # first remove the extension (if any)
                    extensions = ('cbr', 'cbz')
                    if comic_andiss.lower().endswith(extensions):
                        comic_andiss = comic_andiss[:-4]
                        logger.fdebug("removed extension from filename.")
                    #now we have to break up the string regardless of formatting.
                    #let's force the spaces.
                    comic_andiss = re.sub('_', ' ', comic_andiss)
                    cs = comic_andiss.split()
                    cs_len = len(cs)
                    cn = ''
                    ydetected = 'no'
                    idetected = 'no'
                    decimaldetect = 'no'
                    for i in reversed(xrange(len(cs))):
                        #start at the end.
                        logger.fdebug("word: " + str(cs[i]))
                        #assume once we find issue - everything prior is the actual title
                        #idetected = no will ignore everything so it will assume all title
                        if cs[i][:-2] == '19' or cs[
                                i][:-2] == '20' and idetected == 'no':
                            logger.fdebug("year detected: " + str(cs[i]))
                            ydetected = 'yes'
                            result_comyear = cs[i]
                        elif cs[i].isdigit(
                        ) and idetected == 'no' or '.' in cs[i]:
                            if '.' in cs[i]:
                                #make sure it's a number on either side of decimal and assume decimal issue.
                                decst = cs[i].find('.')
                                dec_st = cs[i][:decst]
                                dec_en = cs[i][decst + 1:]
                                logger.fdebug("st: " + str(dec_st))
                                logger.fdebug("en: " + str(dec_en))
                                if dec_st.isdigit() and dec_en.isdigit():
                                    logger.fdebug(
                                        "decimal issue detected...adjusting.")
                                    issue = dec_st + "." + dec_en
                                    logger.fdebug("issue detected: " +
                                                  str(issue))
                                    idetected = 'yes'
                                else:
                                    logger.fdebug(
                                        "false decimal represent. Chunking to extra word."
                                    )
                                    cn = cn + cs[i] + " "
                                    break
                            issue = cs[i]
                            logger.fdebug("issue detected : " + str(issue))
                            idetected = 'yes'

                        elif '\#' in cs[i] or decimaldetect == 'yes':
                            logger.fdebug("issue detected: " + str(cs[i]))
                            idetected = 'yes'
                        else:
                            cn = cn + cs[i] + " "
                    if ydetected == 'no':
                        #assume no year given in filename...
                        result_comyear = "0000"
                    logger.fdebug("cm?: " + str(cn))
                    if issue is not '999999':
                        comiss = issue
                    else:
                        logger.ERROR(
                            "Invalid Issue number (none present) for " +
                            comfilename)
                        break
                    cnsplit = cn.split()
                    cname = ''
                    findcn = 0
                    while (findcn < len(cnsplit)):
                        cname = cname + cs[findcn] + " "
                        findcn += 1
                    cname = cname[:len(cname) - 1]  # drop the end space...
                    print("assuming name is : " + cname)
                    com_NAME = cname
                    print("com_NAME : " + com_NAME)
                    yearmatch = "True"
                else:
                    logger.fdebug('checking ' + m[cnt])
                    # we're assuming that the year is in brackets (and it should be damnit)
                    if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
                        print("year detected: " + str(m[cnt]))
                        ydetected = 'yes'
                        result_comyear = m[cnt]
                    elif m[cnt][:3].lower() in datelist:
                        logger.fdebug(
                            'possible issue date format given - verifying')
                        #if the date of the issue is given as (Jan 2010) or (January 2010) let's adjust.
                        #keeping in mind that ',' and '.' are already stripped from the string
                        if m[cnt][-4:].isdigit():
                            ydetected = 'yes'
                            result_comyear = m[cnt][-4:]
                            logger.fdebug('Valid Issue year of ' +
                                          str(result_comyear) +
                                          'detected in format of ' +
                                          str(m[cnt]))
            cnt += 1

        displength = len(cname)
        print 'd_filename is : ' + d_filename
        charcount = d_filename.count('#')
        print('charcount is : ' + str(charcount))
        if charcount > 0:
            print('entering loop')
            for i, m in enumerate(re.finditer('\#', d_filename)):
                if m.end() <= displength:
                    print comfilename[m.start():m.end()]
                    # find occurance in c_filename, then replace into d_filname so special characters are brought across
                    newchar = comfilename[m.start():m.end()]
                    print 'newchar:' + str(newchar)
                    d_filename = d_filename[:m.start()] + str(
                        newchar) + d_filename[m.end():]
                    print 'd_filename:' + str(d_filename)

        dispname = d_filename[:displength]
        print('dispname : ' + dispname)

        splitit = []
        watchcomic_split = []
        logger.fdebug("filename comic and issue: " + comic_andiss)

        #changed this from '' to ' '
        comic_iss_b4 = re.sub('[\-\:\,]', ' ', comic_andiss)
        comic_iss = comic_iss_b4.replace('.', ' ')
        comic_iss = re.sub('[\s+]', ' ', comic_iss).strip()
        logger.fdebug("adjusted comic and issue: " + str(comic_iss))
        #remove 'the' from here for proper comparisons.
        if ' the ' in comic_iss.lower():
            comic_iss = re.sub('\\bthe\\b', '', comic_iss).strip()
        splitit = comic_iss.split(None)
        logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " +
                      str(comic_iss))
        #here we cycle through the Watchlist looking for a match.
        while (cm_cn < watchcnt):
            #setup the watchlist
            comname = ComicName[cm_cn]
            comyear = ComicYear[cm_cn]
            compub = ComicPublisher[cm_cn]
            comtotal = ComicTotal[cm_cn]
            comicid = ComicID[cm_cn]
            watch_location = ComicLocation[cm_cn]

            # there shouldn't be an issue in the comic now, so let's just assume it's all gravy.
            splitst = len(splitit)
            watchcomic_split = helpers.cleanName(comname)
            watchcomic_split = re.sub('[\-\:\,\.]', ' ',
                                      watchcomic_split).split(None)

            logger.fdebug(
                str(splitit) + " file series word count: " + str(splitst))
            logger.fdebug(
                str(watchcomic_split) + " watchlist word count: " +
                str(len(watchcomic_split)))
            if (splitst) != len(watchcomic_split):
                logger.fdebug("incorrect comic lengths...not a match")


#                if str(splitit[0]).lower() == "the":
#                    logger.fdebug("THE word detected...attempting to adjust pattern matching")
#                    splitit[0] = splitit[4:]
            else:
                logger.fdebug("length match..proceeding")
                n = 0
                scount = 0
                logger.fdebug("search-length: " + str(splitst))
                logger.fdebug("Watchlist-length: " +
                              str(len(watchcomic_split)))
                while (n <= (splitst) - 1):
                    logger.fdebug("splitit: " + str(splitit[n]))
                    if n < (splitst) and n < len(watchcomic_split):
                        logger.fdebug(
                            str(n) + " Comparing: " +
                            str(watchcomic_split[n]) + " .to. " +
                            str(splitit[n]))
                        if '+' in watchcomic_split[n]:
                            watchcomic_split[n] = re.sub(
                                '+', '', str(watchcomic_split[n]))
                        if str(watchcomic_split[n].lower()) in str(
                                splitit[n].lower()) and len(
                                    watchcomic_split[n]) >= len(splitit[n]):
                            logger.fdebug("word matched on : " +
                                          str(splitit[n]))
                            scount += 1
                        #elif ':' in splitit[n] or '-' in splitit[n]:
                        #    splitrep = splitit[n].replace('-', '')
                        #    print ("non-character keyword...skipped on " + splitit[n])
                    elif str(splitit[n]).lower().startswith('v'):
                        logger.fdebug("possible versioning..checking")
                        #we hit a versioning # - account for it
                        if splitit[n][1:].isdigit():
                            comicversion = str(splitit[n])
                            logger.fdebug("version found: " +
                                          str(comicversion))
                    else:
                        logger.fdebug("Comic / Issue section")
                        if splitit[n].isdigit():
                            logger.fdebug("issue detected")
                        else:
                            logger.fdebug("non-match for: " + str(splitit[n]))
                            pass
                    n += 1
                #set the match threshold to 80% (for now)
                # if it's less than 80% consider it a non-match and discard.
                #splitit has to splitit-1 because last position is issue.
                wordcnt = int(scount)
                logger.fdebug("scount:" + str(wordcnt))
                totalcnt = int(splitst)
                logger.fdebug("splitit-len:" + str(totalcnt))
                spercent = (wordcnt / totalcnt) * 100
                logger.fdebug("we got " + str(spercent) + " percent.")
                if int(spercent) >= 80:
                    logger.fdebug("it's a go captain... - we matched " +
                                  str(spercent) + "%!")
                    logger.fdebug("this should be a match!")
                    logger.fdebug("issue we found for is : " + str(comiss))
                    #set the year to the series we just found ;)
                    result_comyear = comyear
                    #issue comparison now as well
                    logger.info(u"Found " + comname + " (" + str(comyear) +
                                ") issue: " + str(comiss))
                    watchmatch = str(comicid)
                    dispname = DisplayName[cm_cn]
                    foundonwatch = "True"
                    break
                elif int(spercent) < 80:
                    logger.fdebug("failure - we only got " + str(spercent) +
                                  "% right!")
            cm_cn += 1

        if foundonwatch == "False":
            watchmatch = None
        #---if it's not a match - send it to the importer.
        n = 0

        if volyr is None:
            if result_comyear is None:
                result_comyear = '0000'  #no year in filename basically.
        else:
            if result_comyear is None:
                result_comyear = volyr
        if volno is None:
            if volyr is None:
                vol_label = None
            else:
                vol_label = volyr
        else:
            vol_label = volno

        print("adding " + com_NAME + " to the import-queue!")
        impid = com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
        print("impid: " + str(impid))
        import_by_comicids.append({
            "impid":
            impid,
            "watchmatch":
            watchmatch,
            "displayname":
            dispname,
            "comicname":
            com_NAME,
            "comicyear":
            result_comyear,
            "volume":
            vol_label,
            "comfilename":
            comfilename,
            "comlocation":
            comlocation.decode(mylar.SYS_ENCODING)
        })

    if len(watch_kchoice) > 0:
        watchchoice['watchlist'] = watch_kchoice
        print("watchchoice: " + str(watchchoice))

        logger.info("I have found " + str(watchfound) + " out of " +
                    str(comiccnt) +
                    " comics for series that are being watched.")
        wat = 0
        comicids = []

        if watchfound > 0:
            if mylar.IMP_MOVE:
                logger.info(
                    "You checked off Move Files...so that's what I'm going to do"
                )
                #check to see if Move Files is enabled.
                #if not being moved, set the archive bit.
                print("Moving files into appropriate directory")
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comlocation = watch_the_list['ComicLocation']
                    watch_comicid = watch_the_list['ComicID']
                    watch_comicname = watch_the_list['ComicName']
                    watch_comicyear = watch_the_list['ComicYear']
                    watch_comiciss = watch_the_list['ComicIssue']
                    print("ComicLocation: " + str(watch_comlocation))
                    orig_comlocation = watch_the_list['OriginalLocation']
                    orig_filename = watch_the_list['OriginalFilename']
                    print("Orig. Location: " + str(orig_comlocation))
                    print("Orig. Filename: " + str(orig_filename))
                    #before moving check to see if Rename to Mylar structure is enabled.
                    if mylar.IMP_RENAME:
                        print(
                            "Renaming files according to configuration details : "
                            + str(mylar.FILE_FORMAT))
                        renameit = helpers.rename_param(
                            watch_comicid, watch_comicname, watch_comicyear,
                            watch_comiciss)
                        nfilename = renameit['nfilename']

                        dst_path = os.path.join(watch_comlocation, nfilename)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    else:
                        print(
                            "Renaming files not enabled, keeping original filename(s)"
                        )
                        dst_path = os.path.join(watch_comlocation,
                                                orig_filename)

                    #os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
                    #src = os.path.join(, str(nfilename + ext))
                    print("I'm going to move " + str(orig_comlocation) +
                          " to .." + str(dst_path))
                    try:
                        shutil.move(orig_comlocation, dst_path)
                    except (OSError, IOError):
                        logger.info(
                            "Failed to move directory - check directories and manually re-run."
                        )
                    wat += 1
            else:
                # if move files isn't enabled, let's set all found comics to Archive status :)
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comicid = watch_the_list['ComicID']
                    watch_issue = watch_the_list['ComicIssue']
                    print("ComicID: " + str(watch_comicid))
                    print("Issue#: " + str(watch_issue))
                    issuechk = myDB.selectone(
                        "SELECT * from issues where ComicID=? AND INT_IssueNumber=?",
                        [watch_comicid, watch_issue]).fetchone()
                    if issuechk is None:
                        print("no matching issues for this comic#")
                    else:
                        print("...Existing status: " + str(issuechk['Status']))
                        control = {"IssueID": issuechk['IssueID']}
                        values = {"Status": "Archived"}
                        print("...changing status of " +
                              str(issuechk['Issue_Number']) + " to Archived ")
                        myDB.upsert("issues", values, control)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    wat += 1
            if comicids is None: pass
            else:
                c_upd = len(comicids)
                c = 0
                while (c < c_upd):
                    print("Rescanning.. " + str(c))
                    updater.forceRescan(c)
        if not len(import_by_comicids):
            return "Completed"
    if len(import_by_comicids) > 0:
        import_comicids['comic_info'] = import_by_comicids
        print("import comicids: " + str(import_by_comicids))
        return import_comicids, len(import_by_comicids)
Example #38
0
def upcoming_update(ComicID,
                    ComicName,
                    IssueNumber,
                    IssueDate,
                    forcecheck=None,
                    futurepull=None,
                    altissuenumber=None):
    # here we add to upcoming table...
    myDB = db.DBConnection()
    dspComicName = ComicName  #to make sure that the word 'annual' will be displayed on screen
    if 'annual' in ComicName.lower():
        adjComicName = re.sub("\\bannual\\b", "",
                              ComicName.lower())  # for use with comparisons.
        logger.fdebug('annual detected - adjusting name to : ' + adjComicName)
    else:
        adjComicName = ComicName
    controlValue = {"ComicID": ComicID}
    newValue = {
        "ComicName": adjComicName,
        "IssueNumber": str(IssueNumber),
        "DisplayComicName": dspComicName,
        "IssueDate": str(IssueDate)
    }

    #let's refresh the series here just to make sure if an issue is available/not.
    mismatch = "no"
    CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?",
                               [ComicID]).fetchone()
    if CV_EXcomicid is None: pass
    else:
        if CV_EXcomicid['variloop'] == '99':
            mismatch = "yes"
    lastupdatechk = myDB.action("SELECT * FROM comics WHERE ComicID=?",
                                [ComicID]).fetchone()
    if lastupdatechk is None:
        pullupd = "yes"
    else:
        c_date = lastupdatechk['LastUpdated']
        if c_date is None:
            logger.error(
                lastupdatechk['ComicName'] +
                ' failed during a previous add /refresh. Please either delete and readd the series, or try a refresh of the series.'
            )
            return
        c_obj_date = datetime.datetime.strptime(c_date, "%Y-%m-%d %H:%M:%S")
        n_date = datetime.datetime.now()
        absdiff = abs(n_date - c_obj_date)
        hours = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 3600.0
        # no need to hammer the refresh
        # let's check it every 5 hours (or more)
        #pullupd = "yes"
    if 'annual' in ComicName.lower():
        if mylar.ANNUALS_ON:
            issuechk = myDB.action(
                "SELECT * FROM annuals WHERE ComicID=? AND Issue_Number=?",
                [ComicID, IssueNumber]).fetchone()
        else:
            logger.fdebug(
                'Annual detected, but annuals not enabled. Ignoring result.')
            return
    else:
        issuechk = myDB.action(
            "SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?",
            [ComicID, IssueNumber]).fetchone()

    if issuechk is None and altissuenumber is not None:
        logger.info('altissuenumber is : ' + str(altissuenumber))
        issuechk = myDB.action(
            "SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?",
            [ComicID, helpers.issuedigits(altissuenumber)]).fetchone()
    if issuechk is None:
        if futurepull is None:
            logger.fdebug(
                adjComicName + ' Issue: ' + str(IssueNumber) +
                ' not present in listings to mark for download...updating comic and adding to Upcoming Wanted Releases.'
            )
            # we need to either decrease the total issue count, OR indicate that an issue is upcoming.
            upco_results = myDB.action(
                "SELECT COUNT(*) FROM UPCOMING WHERE ComicID=?",
                [ComicID]).fetchall()
            upco_iss = upco_results[0][0]
            #logger.info("upco_iss: " + str(upco_iss))
            if int(upco_iss) > 0:
                #logger.info("There is " + str(upco_iss) + " of " + str(ComicName) + " that's not accounted for")
                newKey = {"ComicID": ComicID}
                newVal = {"not_updated_db": str(upco_iss)}
                myDB.upsert("comics", newVal, newKey)
            elif int(upco_iss) <= 0 and lastupdatechk['not_updated_db']:
                #if not_updated_db has a value, and upco_iss is > 0, let's zero it back out cause it's updated now.
                newKey = {"ComicID": ComicID}
                newVal = {"not_updated_db": ""}
                myDB.upsert("comics", newVal, newKey)

            if hours > 5 or forcecheck == 'yes':
                pullupd = "yes"
                logger.fdebug('Now Refreshing comic ' + ComicName +
                              ' to make sure it is up-to-date')
                if ComicID[:1] == "G":
                    mylar.importer.GCDimport(ComicID, pullupd)
                else:
                    mylar.importer.addComictoDB(ComicID, mismatch, pullupd)
            else:
                logger.fdebug(
                    'It has not been longer than 5 hours since we last did this...we will wait so we do not hammer things.'
                )
                return
        else:
            # if futurepull is not None, let's just update the status and ComicID
            # NOTE: THIS IS CREATING EMPTY ENTRIES IN THE FUTURE TABLE. ???
            nKey = {"ComicID": ComicID}
            nVal = {"Status": "Wanted"}
            myDB.upsert("future", nVal, nKey)

    if issuechk is not None:
        if issuechk['Issue_Number'] == IssueNumber or issuechk[
                'Issue_Number'] == altissuenumber:
            logger.fdebug(
                'Comic series already up-to-date ... no need to refresh at this time.'
            )
            logger.fdebug('Available to be marked for download - checking...' +
                          adjComicName + ' Issue: ' +
                          str(issuechk['Issue_Number']))
            logger.fdebug('...Existing status: ' + str(issuechk['Status']))
            control = {"IssueID": issuechk['IssueID']}
            newValue['IssueID'] = issuechk['IssueID']
            if issuechk['Status'] == "Snatched":
                values = {"Status": "Snatched"}
                newValue['Status'] = "Snatched"
            elif issuechk['Status'] == "Downloaded":
                values = {"Status": "Downloaded"}
                newValue['Status'] = "Downloaded"
                #if the status is Downloaded and it's on the pullist - let's mark it so everyone can bask in the glory

            elif issuechk['Status'] == "Wanted":
                values = {"Status": "Wanted"}
                newValue['Status'] = "Wanted"
            elif issuechk['Status'] == "Archived":
                values = {"Status": "Archived"}
                newValue['Status'] = "Archived"
            else:
                values = {"Status": "Skipped"}
                newValue['Status'] = "Skipped"
            #was in wrong place :(
        else:
            logger.fdebug(
                'Issues do not match for some reason...weekly new issue: ' +
                str(IssueNumber))
            return

    if mylar.AUTOWANT_UPCOMING:
        #for issues not in db - to be added to Upcoming table.
        if issuechk is None:
            newValue['Status'] = "Wanted"
            logger.fdebug(
                '...Changing Status to Wanted and throwing it in the Upcoming section since it is not published yet.'
            )
        #this works for issues existing in DB...
        elif issuechk['Status'] == "Skipped":
            newValue['Status'] = "Wanted"
            values = {"Status": "Wanted"}
            logger.fdebug('...New status of Wanted')
        elif issuechk['Status'] == "Wanted":
            logger.fdebug('...Status already Wanted .. not changing.')
        else:
            logger.fdebug(
                '...Already have issue - keeping existing status of : ' +
                str(issuechk['Status']))

    if issuechk is None:
        myDB.upsert("upcoming", newValue, controlValue)
    else:
        logger.fdebug('--attempt to find errant adds to Wanted list')
        logger.fdebug('UpcomingNewValue: ' + str(newValue))
        logger.fdebug('UpcomingcontrolValue: ' + str(controlValue))
        if issuechk['IssueDate'] == '0000-00-00' and newValue[
                'IssueDate'] != '0000-00-00':
            logger.fdebug(
                'Found a 0000-00-00 issue - force updating series to try and get it proper.'
            )
            dateVal = {
                "IssueDate": newValue['IssueDate'],
                "ComicName": issuechk['ComicName'],
                "Status": newValue['Status'],
                "IssueNumber": issuechk['Issue_Number']
            }
            logger.fdebug('updating date in upcoming table to : ' +
                          str(newValue['IssueDate']))
            logger.fdebug('ComicID:' + str(controlValue))
            myDB.upsert("upcoming", dateVal, controlValue)
            logger.fdebug('Temporarily putting the Issue Date for ' +
                          str(issuechk['Issue_Number']) + ' to ' +
                          str(newValue['IssueDate']))
            values = {"IssueDate": newValue['IssueDate']}
            #if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID,pullupd='yes')
            #else: mylar.importer.addComictoDB(ComicID,mismatch,pullupd='yes')

        if 'annual' in ComicName.lower():
            myDB.upsert("annuals", values, control)
        else:
            myDB.upsert("issues", values, control)

        if issuechk['Status'] == 'Downloaded' or issuechk[
                'Status'] == 'Archived' or issuechk['Status'] == 'Snatched':
            logger.fdebug('updating Pull-list to reflect status.')
            downstats = {
                "Status": issuechk['Status'],
                "ComicID": issuechk['ComicID']
            }
            return downstats
Example #39
0
    def Process(self):
            self._log("nzb name: " + str(self.nzb_name), logger.DEBUG)
            self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG)
            logger.fdebug("nzb name: " + str(self.nzb_name))
            logger.fdebug("nzb folder: " + str(self.nzb_folder))
            if mylar.USE_SABNZBD==0:
                logger.fdebug("Not using SABNzbd")
            else:
                # if the SAB Directory option is enabled, let's use that folder name and append the jobname.
                if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(mylar.SAB_DIRECTORY) > 4:
                    self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
    
                #lookup nzb_name in nzblog table to get issueid
    
                #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals
                #http://localhost:8080/sabnzbd/api?mode=set_config&section=misc&keyword=dirscan_speed&value=5
                querysab = str(mylar.SAB_HOST) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY)
                #logger.info("querysab_string:" + str(querysab))
                file = urllib2.urlopen(querysab)
                data = file.read()
                file.close()
                dom = parseString(data)

                try:
                    sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText
                except:
                    errorm = dom.getElementsByTagName('error')[0].firstChild.wholeText
                    logger.error(u"Error detected attempting to retrieve SAB data : " + errorm)
                    return
                sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText
                logger.fdebug("SAB Replace Spaces: " + str(sabreps))
                logger.fdebug("SAB Replace Dots: " + str(sabrepd))
            if mylar.USE_NZBGET==1:
                logger.fdebug("Using NZBGET")
                logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name)
            myDB = db.DBConnection()

            nzbname = self.nzb_name
            #remove extensions from nzb_name if they somehow got through (Experimental most likely)
            extensions = ('.cbr', '.cbz')

            if nzbname.lower().endswith(extensions):
                fd, ext = os.path.splitext(nzbname)
                self._log("Removed extension from nzb: " + ext, logger.DEBUG)
                nzbname = re.sub(str(ext), '', str(nzbname))

            #replace spaces
            nzbname = re.sub(' ', '.', str(nzbname))
            nzbname = re.sub('[\,\:\?]', '', str(nzbname))
            nzbname = re.sub('[\&]', 'and', str(nzbname))

            logger.fdebug("After conversions, nzbname is : " + str(nzbname))
#            if mylar.USE_NZBGET==1:
#                nzbname=self.nzb_name
            self._log("nzbname: " + str(nzbname), logger.DEBUG)

            nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()

            if nzbiss is None:
                self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG)
                logger.fdebug("Failure - could not locate nzbfile initially.")
                # if failed on spaces, change it all to decimals and try again.
                nzbname = re.sub('_', '.', str(nzbname))
                self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG)
                logger.fdebug("trying again with nzbname of : " + str(nzbname))
                nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
                if nzbiss is None:
                    logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.")
                    return
                else:
                    self._log("I corrected and found the nzb as : " + str(nzbname))
                    logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname))
                    issueid = nzbiss['IssueID']
            else: 
                issueid = nzbiss['IssueID']
                logger.fdebug("issueid:" + str(issueid))
                sarc = nzbiss['SARC']
                #use issueid to get publisher, series, year, issue number
            issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone()
            if issuenzb is not None:
                if helpers.is_number(issueid):
                    sandwich = int(issuenzb['IssueID'])
            else:
                #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
                #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
                if 'S' in issueid:
                    sandwich = issueid
                elif 'G' in issueid: 
                    sandwich = 1
            if helpers.is_number(sandwich):
                if sandwich < 900000:
                    # if sandwich is less than 900000 it's a normal watchlist download. Bypass.
                    pass
            else:
                if issuenzb is None or 'S' in sandwich or int(sandwich) >= 900000:
                    # this has no issueID, therefore it's a one-off or a manual post-proc.
                    # At this point, let's just drop it into the Comic Location folder and forget about it..
                    if 'S' in sandwich:
                        self._log("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
                        logger.info("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
                        if mylar.STORYARCDIR:
                            storyarcd = os.path.join(mylar.DESTINATION_DIR, "StoryArcs", sarc)
                            self._log("StoryArc Directory set to : " + storyarcd, logger.DEBUG)
                        else:
                            self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG)

                    else:
                        self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.", logger.DEBUG)
                        logger.info("One-off mode enabled for Post-Processing. Will move into Grab-bag directory.")
                        self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG)

                    for root, dirnames, filenames in os.walk(self.nzb_folder):
                        for filename in filenames:
                            if filename.lower().endswith(extensions):
                                ofilename = filename
                                path, ext = os.path.splitext(ofilename)
      
                    if 'S' in sandwich:
                        if mylar.STORYARCDIR:
                            grdst = storyarcd
                        else:
                            grdst = mylar.DESTINATION_DIR
                    else:
                        if mylar.GRABBAG_DIR:
                            grdst = mylar.GRABBAG_DIR
                        else:
                            grdst = mylar.DESTINATION_DIR

                    filechecker.validateAndCreateDirectory(grdst, True)
    
                    grab_dst = os.path.join(grdst, ofilename)
                    self._log("Destination Path : " + grab_dst, logger.DEBUG)
                    logger.info("Destination Path : " + grab_dst)
                    grab_src = os.path.join(self.nzb_folder, ofilename)
                    self._log("Source Path : " + grab_src, logger.DEBUG)
                    logger.info("Source Path : " + grab_src)

                    logger.info("Moving " + str(ofilename) + " into directory : " + str(grdst))

                    try:
                        shutil.move(grab_src, grab_dst)
                    except (OSError, IOError):
                        self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
                        logger.debug("Failed to move directory - check directories and manually re-run.")
                        return
                    #tidyup old path
                    try:
                        shutil.rmtree(self.nzb_folder)
                    except (OSError, IOError):
                        self._log("Failed to remove temporary directory.", logger.DEBUG)
                        logger.debug("Failed to remove temporary directory - check directory and manually re-run.")
                        return

                    logger.debug("Removed temporary directory : " + str(self.nzb_folder))
                    self._log("Removed temporary directory : " + self.nzb_folder, logger.DEBUG)
                    #delete entry from nzblog table
                    myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])

                    if 'S' in issueid:
                        issuearcid = re.sub('S', '', issueid)
                        logger.info("IssueArcID is : " + str(issuearcid))
                        ctrlVal = {"IssueArcID":  issuearcid}
                        newVal = {"Status":    "Downloaded",
                                  "Location":  grab_dst }
                        myDB.upsert("readinglist",newVal,ctrlVal)
                        logger.info("updated status to Downloaded")
                    return self.log

            comicid = issuenzb['ComicID']
            issuenumOG = issuenzb['Issue_Number']
            #issueno = str(issuenum).split('.')[0]
            #new CV API - removed all decimals...here we go AGAIN!
            issuenum = issuenumOG
            issue_except = 'None'
            if 'au' in issuenum.lower():
                issuenum = re.sub("[^0-9]", "", issuenum)
                issue_except = ' AU'
            if '.' in issuenum:
                iss_find = issuenum.find('.')
                iss_b4dec = issuenum[:iss_find]
                iss_decval = issuenum[iss_find+1:]
                if int(iss_decval) == 0:
                    iss = iss_b4dec
                    issdec = int(iss_decval)
                    issueno = str(iss)
                    self._log("Issue Number: " + str(issueno), logger.DEBUG)
                    logger.fdebug("Issue Number: " + str(issueno))
                else:
                    if len(iss_decval) == 1:
                        iss = iss_b4dec + "." + iss_decval
                        issdec = int(iss_decval) * 10
                    else:
                        iss = iss_b4dec + "." + iss_decval.rstrip('0')
                        issdec = int(iss_decval.rstrip('0')) * 10
                    issueno = iss_b4dec
                    self._log("Issue Number: " + str(iss), logger.DEBUG)
                    logger.fdebug("Issue Number: " + str(iss))
            else:
                iss = issuenum
                issueno = str(iss)
            # issue zero-suppression here
            if mylar.ZERO_LEVEL == "0": 
                zeroadd = ""
            else:
                if mylar.ZERO_LEVEL_N  == "none": zeroadd = ""
                elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
                elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"

            logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N))

            if str(len(issueno)) > 1:
                if int(issueno) < 10:
                    self._log("issue detected less than 10", logger.DEBUG)
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                            prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None': 
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                elif int(issueno) >= 10 and int(issueno) < 100:
                    self._log("issue detected greater than 10, but less than 100", logger.DEBUG)
                    if mylar.ZERO_LEVEL_N == "none":
                        zeroadd = ""
                    else:
                        zeroadd = "0"
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                           prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                else:
                    self._log("issue detected greater than 100", logger.DEBUG)
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                    prettycomiss = str(issueno)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
            else:
                prettycomiss = str(issueno)
                self._log("issue length error - cannot determine length. Defaulting to None:  " + str(prettycomiss), logger.DEBUG)

            logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss))
            issueyear = issuenzb['IssueDate'][:4]
            self._log("Issue Year: " + str(issueyear), logger.DEBUG)
            logger.fdebug("Issue Year : " + str(issueyear))
            comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
            publisher = comicnzb['ComicPublisher']
            self._log("Publisher: " + publisher, logger.DEBUG)
            logger.fdebug("Publisher: " + str(publisher))
            #we need to un-unicode this to make sure we can write the filenames properly for spec.chars
            series = comicnzb['ComicName'].encode('ascii', 'ignore').strip()
            self._log("Series: " + series, logger.DEBUG)
            logger.fdebug("Series: " + str(series))
            seriesyear = comicnzb['ComicYear']
            self._log("Year: " + seriesyear, logger.DEBUG)
            logger.fdebug("Year: "  + str(seriesyear))
            comlocation = comicnzb['ComicLocation']
            self._log("Comic Location: " + comlocation, logger.DEBUG)
            logger.fdebug("Comic Location: " + str(comlocation))
            comversion = comicnzb['ComicVersion']
            self._log("Comic Version: " + str(comversion), logger.DEBUG)
            logger.fdebug("Comic Version: " + str(comversion))
            if comversion is None:
                comversion = 'None'
            #if comversion is None, remove it so it doesn't populate with 'None'
            if comversion == 'None':
                chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
                chunk_f = re.compile(r'\s+')
                chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                self._log("No version # found for series - tag will not be available for renaming.", logger.DEBUG)
                logger.fdebug("No version # found for series, removing from filename")
                logger.fdebug("new format is now: " + str(chunk_file_format))
            else:
                chunk_file_format = mylar.FILE_FORMAT
            #Run Pre-script

            if mylar.ENABLE_PRE_SCRIPTS:
                nzbn = self.nzb_name #original nzb name
                nzbf = self.nzb_folder #original nzb folder
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_pre_scripts(nzbn, nzbf, seriesmetadata )

        #rename file and move to new path
        #nfilename = series + " " + issueno + " (" + seriesyear + ")"

            file_values = {'$Series':    series,
                           '$Issue':     prettycomiss,
                           '$Year':      issueyear,
                           '$series':    series.lower(),
                           '$Publisher': publisher,
                           '$publisher': publisher.lower(),
                           '$VolumeY':   'V' + str(seriesyear),
                           '$VolumeN':   comversion
                          }

            ofilename = None

            for root, dirnames, filenames in os.walk(self.nzb_folder):
                for filename in filenames:
                    if filename.lower().endswith(extensions):
                        ofilename = filename
                        path, ext = os.path.splitext(ofilename)

            if ofilename is None:
                logger.error(u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.")
                return
            self._log("Original Filename: " + ofilename, logger.DEBUG)
            self._log("Original Extension: " + ext, logger.DEBUG)
            logger.fdebug("Original Filname: " + str(ofilename))
            logger.fdebug("Original Extension: " + str(ext))

            if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES:
                self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG)
                logger.fdebug("Rename Files isn't enabled - keeping original filename.")
                #check if extension is in nzb_name - will screw up otherwise
                if ofilename.lower().endswith(extensions):
                    nfilename = ofilename[:-4]
                else:
                    nfilename = ofilename
            else:
                nfilename = helpers.replace_all(chunk_file_format, file_values)
                if mylar.REPLACE_SPACES:
                    #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                    nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
            nfilename = re.sub('[\,\:\?]', '', nfilename)
            self._log("New Filename: " + nfilename, logger.DEBUG)
            logger.fdebug("New Filename: " + str(nfilename))

            src = os.path.join(self.nzb_folder, ofilename)

            filechecker.validateAndCreateDirectory(comlocation, True)

            if mylar.LOWERCASE_FILENAMES:
                dst = (comlocation + "/" + nfilename + ext).lower()
            else:
                dst = comlocation + "/" + nfilename + ext.lower()    
            self._log("Source:" + src, logger.DEBUG)
            self._log("Destination:" +  dst, logger.DEBUG)
            logger.fdebug("Source: " + str(src))
            logger.fdebug("Destination: " + str(dst))

            os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
            src = os.path.join(self.nzb_folder, str(nfilename + ext))
            try:
                shutil.move(src, dst)
            except (OSError, IOError):
                self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return
            #tidyup old path
            try:
                shutil.rmtree(self.nzb_folder)
            except (OSError, IOError):
                self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return

            self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG)
                    #delete entry from nzblog table
            myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
                    #update snatched table to change status to Downloaded
            updater.foundsearch(comicid, issueid, down='True')
                    #force rescan of files
            updater.forceRescan(comicid)
            logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) )
            self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG)

            if mylar.PROWL_ENABLED:
                pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG
                logger.info(u"Prowl request")
                prowl = notifiers.PROWL()
                prowl.notify(pushmessage,"Download and Postprocessing completed")

            if mylar.NMA_ENABLED:
                nma = notifiers.NMA()
                nma.notify(series, str(issueyear), str(issuenumOG))

            if mylar.PUSHOVER_ENABLED:
                pushmessage = series + ' (' + str(issueyear) + ') - issue #' + str(issuenumOG)
                logger.info(u"Pushover request")
                pushover = notifiers.PUSHOVER()
                pushover.notify(pushmessage, "Download and Post-Processing completed")
             
            # retrieve/create the corresponding comic objects

            if mylar.ENABLE_EXTRA_SCRIPTS:
                folderp = str(dst) #folder location after move/rename
                nzbn = self.nzb_name #original nzb name
                filen = str(nfilename + ext) #new filename
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata )

            return self.log
Example #40
0
 def run(self):
     logger.info(
         '[WEEKLY] Checking Weekly Pull-list for new releases/updates')
     mylar.weeklypull.pullit()
     return
Example #41
0
def nzbdbsearch(seriesname,
                issue,
                comicid=None,
                nzbprov=None,
                searchYear=None,
                ComicVersion=None):
    myDB = db.DBConnection()
    seriesname_alt = None
    if comicid is None or comicid == 'None':
        pass
    else:
        snm = myDB.selectone("SELECT * FROM comics WHERE comicid=?",
                             [comicid]).fetchone()
        if snm is None:
            logger.info('Invalid ComicID of ' + str(comicid) +
                        '. Aborting search.')
            return
        else:
            seriesname = snm['ComicName']
            seriesname_alt = snm['AlternateSearch']

    nsearch_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.\-\s]', '%',
                                seriesname)
    formatrem_seriesname = re.sub('[\'\!\@\#\$\%\:\;\/\\=\?\.]', '',
                                  seriesname)
    nsearch = '%' + nsearch_seriesname + "%"
    nresults = myDB.select("SELECT * FROM rssdb WHERE Title like ? AND Site=?",
                           [nsearch, nzbprov])
    if nresults is None:
        logger.fdebug('nzb search returned no results for ' + seriesname)
        if seriesname_alt is None:
            logger.fdebug('no nzb Alternate name given. Aborting search.')
            return "no results"
        else:
            chkthealt = seriesname_alt.split('##')
            if chkthealt == 0:
                AS_Alternate = AlternateSearch
            for calt in chkthealt:
                AS_Alternate = re.sub('##', '', calt)
                nresults += myDB.select(
                    "SELECT * FROM rssdb WHERE Title like ? AND Site=?",
                    [AS_Alternate, nzbprov])
            if nresults is None:
                logger.fdebug('nzb alternate name search returned no results.')
                return "no results"

    nzbtheinfo = []
    nzbinfo = {}

    if nzbprov == 'experimental':
        except_list = [
            'releases', 'gold line', 'distribution', '0-day', '0 day'
        ]

        if ComicVersion:
            ComVersChk = re.sub("[^0-9]", "", ComicVersion)
            if ComVersChk == '':
                ComVersChk = 0
            else:
                ComVersChk = 0
        else:
            ComVersChk = 0

        for results in nresults:
            title = results['Title']
            #logger.fdebug("titlesplit: " + str(title.split("\"")))
            splitTitle = title.split("\"")
            noYear = 'False'

            for subs in splitTitle:
                #logger.fdebug(subs)
                if len(subs) > 10 and not any(d in subs.lower()
                                              for d in except_list):
                    if ComVersChk == 0:
                        noYear = 'False'

                    if ComVersChk != 0 and searchYear not in subs:
                        noYear = 'True'
                        noYearline = subs

                    if searchYear in subs and noYear == 'True':
                        #this would occur on the next check in the line, if year exists and
                        #the noYear check in the first check came back valid append it
                        subs = noYearline + ' (' + searchYear + ')'
                        noYear = 'False'

                    if noYear == 'False':

                        nzbtheinfo.append({
                            'title':
                            subs,
                            'link':
                            re.sub('\/release\/', '/download/',
                                   results['Link']),
                            'pubdate':
                            str(results['PubDate']),
                            'site':
                            str(results['Site']),
                            'length':
                            str(results['Size'])
                        })

    else:
        for nzb in nresults:
            # no need to parse here, just compile and throw it back ....
            nzbtheinfo.append({
                'title': nzb['Title'],
                'link': nzb['Link'],
                'pubdate': nzb['Pubdate'],
                'site': nzb['Site'],
                'length': nzb['Size']
            })
            #logger.fdebug("entered info for " + nzb['Title'])

    nzbinfo['entries'] = nzbtheinfo
    return nzbinfo
Example #42
0
def nzbs(provider=None):
    nzbprovider = []
    nzbp = 0
    if mylar.NZBSU == 1:
        nzbprovider.append('nzb.su')
        nzbp += 1
    if mylar.DOGNZB == 1:
        nzbprovider.append('dognzb')
        nzbp += 1
    # --------
    #  Xperimental
    if mylar.EXPERIMENTAL == 1:
        nzbprovider.append('experimental')
        nzbp += 1

    newznabs = 0

    newznab_hosts = []

    if mylar.NEWZNAB == 1:

        for newznab_host in mylar.EXTRA_NEWZNABS:
            if newznab_host[4] == '1' or newznab_host[4] == 1:
                newznab_hosts.append(newznab_host)
                nzbprovider.append('newznab')
                newznabs += 1
                logger.fdebug('newznab name:' + str(newznab_host[0]) +
                              ' - enabled: ' + str(newznab_host[4]))

    # --------
    providercount = int(nzbp + newznabs)
    logger.fdebug('there are : ' + str(providercount) +
                  ' nzb RSS search providers you have enabled.')
    nzbpr = providercount - 1
    if nzbpr < 0:
        nzbpr == 0

    feeddata = []
    feedthis = []
    ft = 0
    totNum = 0
    nonexp = "no"

    while (nzbpr >= 0):
        if nzbprovider[nzbpr] == 'experimental':
            feed = feedparser.parse(
                "http://nzbindex.nl/rss/alt.binaries.comics.dcp/?sort=agedesc&max=50&more=1"
            )

            totNum = len(feed.entries)
            site = 'experimental'
            keyPair = {}
            regList = []
            entries = []
            mres = {}
            countUp = 0

            i = 0
            for entry in feed['entries']:
                tmpsz = feed.entries[i].enclosures[0]
                feeddata.append({
                    'Site': site,
                    'Title': feed.entries[i].title,
                    'Link': tmpsz['url'],  #feed.entries[i].link,
                    'Pubdate': feed.entries[i].updated,
                    'Size': tmpsz['length']
                })
                #                print ("Site:" + str(site))
                #                print ("Title:" + str(feed.entries[i].title))
                #                print ("Link:" + str(feed.entries[i].link))
                #                print ("Pubdate:" + str(feed.entries[i].updated))
                #                print ("Size:" + str(tmpsz['length']))
                i += 1
            logger.info(str(i) + ' results from Experimental feed indexed.')
            nzbpr -= 1
        else:
            if nzbprovider[nzbpr] == 'newznab':
                for newznab_host in newznab_hosts:
                    if newznab_host[3] is None:
                        newznabuid = '1'
                        newznabcat = '7030'
                    else:
                        if '#' not in newznab_host[3]:
                            newznabuid = newznab_host[3]
                            newznabcat = '7030'
                        else:
                            newzst = newznab_host[3].find('#')
                            newznabuid = newznab_host[3][:newzst]
                            newznabcat = newznab_host[3][newzst + 1:]
                    feed = newznab_host[1].rstrip() + '/rss?t=' + str(
                        newznabcat) + '&dl=1&i=' + str(
                            newznabuid) + '&r=' + newznab_host[2].rstrip()
                    feedme = feedparser.parse(feed)
                    site = newznab_host[0].rstrip()
                    feedthis.append({"feed": feedme, "site": site})
                    totNum += len(feedme.entries)
                    ft += 1
                    nonexp = "yes"
                    nzbpr -= 1
            elif nzbprovider[nzbpr] == 'nzb.su':
                if mylar.NZBSU_UID is None:
                    mylar.NZBSU_UID = '1'
                feed = 'http://api.nzb.su/rss?t=7030&dl=1&i=' + mylar.NZBSU_UID + '&r=' + mylar.NZBSU_APIKEY
                feedme = feedparser.parse(feed)
                site = nzbprovider[nzbpr]
                feedthis.append({"feed": feedme, "site": site})
                totNum += len(feedme.entries)
                ft += 1
                nonexp = "yes"
                nzbpr -= 1
            elif nzbprovider[nzbpr] == 'dognzb':
                if mylar.DOGNZB_UID is None:
                    mylar.DOGNZB_UID = '1'
                feed = 'https://dognzb.cr/rss.cfm?r=' + mylar.DOGNZB_APIKEY + '&t=7030'
                feedme = feedparser.parse(feed)
                site = nzbprovider[nzbpr]
                ft += 1
                nonexp = "yes"
                feedthis.append({"feed": feedme, "site": site})
                totNum += len(feedme.entries)
                nzbpr -= 1

    i = 0
    if nonexp == "yes":
        #print str(ft) + " sites checked. There are " + str(totNum) + " entries to be updated."
        #print feedme

        for ft in feedthis:
            sitei = 0
            site = ft['site']
            logger.fdebug(str(site) + " now being updated...")
            #logger.fdebug('feedthis:' + str(ft))
            for entry in ft['feed'].entries:
                if site == 'dognzb':
                    #because the rss of dog doesn't carry the enclosure item, we'll use the newznab size value
                    tmpsz = 0
                    #for attr in entry['newznab:attrib']:
                    #    if attr('@name') == 'size':
                    #        tmpsz = attr['@value']
                    #        logger.fdebug('size retrieved as ' + str(tmpsz))
                    #        break
                    feeddata.append({
                        'Site': site,
                        'Title': entry.title,  #ft['feed'].entries[i].title,
                        'Link': entry.link,  #ft['feed'].entries[i].link,
                        'Pubdate':
                        entry.updated,  #ft['feed'].entries[i].updated,
                        'Size': tmpsz
                    })
                else:
                    #this should work for all newznabs (nzb.su included)
                    #only difference is the size of the file between this and above (which is probably the same)
                    tmpsz = entry.enclosures[
                        0]  #ft['feed'].entries[i].enclosures[0]
                    feeddata.append({
                        'Site': site,
                        'Title': entry.title,  #ft['feed'].entries[i].title,
                        'Link': entry.link,  #ft['feed'].entries[i].link,
                        'Pubdate':
                        entry.updated,  #ft['feed'].entries[i].updated,
                        'Size': tmpsz['length']
                    })

                #logger.fdebug("Site: " + str(feeddata[i]['Site']))
                #logger.fdebug("Title: " + str(feeddata[i]['Title']))
                #logger.fdebug("Link: " + str(feeddata[i]['Link']))
                #logger.fdebug("pubdate: " + str(feeddata[i]['Pubdate']))
                #logger.fdebug("size: " + str(feeddata[i]['Size']))
                sitei += 1
            logger.info('[' + str(site) + '] ' + str(sitei) +
                        ' entries indexed.')
            i += sitei
    if i > 0:
        logger.info(
            '[RSS] ' + str(i) +
            ' entries have been indexed and are now going to be stored for caching.'
        )
        rssdbupdate(feeddata, i, 'usenet')
    return
Example #43
0
def tehMain(forcerss=None):
    logger.info('RSS Feed Check was last run at : ' + str(mylar.RSS_LASTRUN))
    firstrun = "no"
    #check the last run of rss to make sure it's not hammering.
    if mylar.RSS_LASTRUN is None or mylar.RSS_LASTRUN == '' or mylar.RSS_LASTRUN == '0' or forcerss == True:
        logger.info('RSS Feed Check First Ever Run.')
        firstrun = "yes"
        mins = 0
    else:
        c_obj_date = datetime.datetime.strptime(mylar.RSS_LASTRUN,
                                                "%Y-%m-%d %H:%M:%S")
        n_date = datetime.datetime.now()
        absdiff = abs(n_date - c_obj_date)
        mins = (absdiff.days * 24 * 60 * 60 +
                absdiff.seconds) / 60.0  #3600 is for hours.

    if firstrun == "no" and mins < int(mylar.RSS_CHECKINTERVAL):
        logger.fdebug(
            'RSS Check has taken place less than the threshold - not initiating at this time.'
        )
        return

    mylar.RSS_LASTRUN = helpers.now()
    logger.fdebug('Updating RSS Run time to : ' + str(mylar.RSS_LASTRUN))
    mylar.config_write()

    #function for looping through nzbs/torrent feed
    if mylar.ENABLE_TORRENTS:
        logger.info('[RSS] Initiating Torrent RSS Check.')
        if mylar.ENABLE_KAT:
            logger.info('[RSS] Initiating Torrent RSS Feed Check on KAT.')
            torrents(pickfeed='3')
            torrents(pickfeed='6')
        if mylar.ENABLE_CBT:
            logger.info('[RSS] Initiating Torrent RSS Feed Check on CBT.')
            torrents(pickfeed='1')
            torrents(pickfeed='4')
    logger.info('[RSS] Initiating RSS Feed Check for NZB Providers.')
    nzbs()
    logger.info('[RSS] RSS Feed Check/Update Complete')
    logger.info('[RSS] Watchlist Check for new Releases')
    mylar.search.searchforissue(rsscheck='yes')
    logger.info('[RSS] Watchlist Check complete.')
    if forcerss:
        logger.info('Successfully ran RSS Force Check.')

    return
Example #44
0
def NZB_SEARCH(ComicName, IssueNumber, ComicYear, SeriesYear, nzbprov, nzbpr):
    logger.info(u"Shhh be very quiet...I'm looking for " + ComicName + " issue: " + str(IssueNumber) + " using " + str(nzbprov))
    if nzbprov == 'nzb.su':
        apikey = mylar.NZBSU_APIKEY
    elif nzbprov == 'dognzb':
        apikey = mylar.DOGNZB_APIKEY
    elif nzbprov == 'experimental':
        apikey = 'none'
    #print ("-------------------------")

    if mylar.PREFERRED_QUALITY == 0: filetype = ""
    elif mylar.PREFERRED_QUALITY == 1: filetype = ".cbr"
    elif mylar.PREFERRED_QUALITY == 2: filetype = ".cbz"

    # figure out what was missed via rss feeds and do a manual search via api
    #tsc = int(tot-1)
    findcomic = []
    findcomiciss = []
    findcount = 0
    ci = ""
    comsearch = []
    isssearch = []
    comyear = str(ComicYear)

    #print ("-------SEARCH FOR MISSING------------------")
    findcomic.append(str(ComicName))
    IssueNumber = str(re.sub("\.00", "", str(IssueNumber)))
    #print ("issueNumber" + str(IssueNumber))
    findcomiciss.append(str(re.sub("\D", "", str(IssueNumber))))
    
    #print ("we need : " + str(findcomic[findcount]) + " issue: #" + str(findcomiciss[findcount]))
    # replace whitespace in comic name with %20 for api search
    cm = re.sub(" ", "%20", str(findcomic[findcount]))
    #print (cmi)
    if len(str(findcomiciss[findcount])) == 1:
        cmloopit = 3
    elif len(str(findcomiciss[findcount])) == 2:
        cmloopit = 2
    else:
        cmloopit = 1
    isssearch.append(str(findcomiciss[findcount]))
    comsearch.append(cm)
    findcount+=1

    # ----

    #print ("------RESULTS OF SEARCH-------------------")
    findloop = 0
    foundcomic = []

    #---issue problem
    # if issue is '011' instead of '11' in nzb search results, will not have same
    # results. '011' will return different than '11', as will '009' and '09'.

    while (findloop < (findcount) ):
        comsrc = comsearch[findloop]
        #print (str(comsearch[findloop]))
        while (cmloopit >= 1 ):
                # here we account for issue pattern variations
            if cmloopit == 3:
                comsearch[findloop] = comsrc + "%2000" + isssearch[findloop] + "%20" + str(filetype)
                #print (comsearch[findloop])
            elif cmloopit == 2:
                comsearch[findloop] = comsrc + "%200" + isssearch[findloop] + "%20" + str(filetype)
                #print (comsearch[findloop])
            elif cmloopit == 1:
                comsearch[findloop] = comsrc + "%20" + isssearch[findloop] + "%20" + str(filetype)
                #print (comsearch[findloop])
            #print ("NZB Provider set to: " + nzbprov)
            if nzbprov != 'experimental':
                if nzbprov == 'dognzb':
                    #print ("dog-search.")
                    findurl = "http://dognzb.cr/api?t=search&apikey=" + str(apikey) + "&q=" + str(comsearch[findloop]) + "&o=xml&cat=7030"
                elif nzbprov == 'nzb.su':
                    #print ("nzb.su search")
                    findurl = "http://nzb.su/api?t=search&q=" + str(comsearch[findloop]) + "&apikey=" + str(apikey) + "&o=xml&cat=7030"
                bb = feedparser.parse(findurl)
                #print (findurl)
            elif nzbprov == 'experimental':
                #print ("experimental raw search")
                bb = parseit.MysterBinScrape(comsearch[findloop], comyear)
            done = False
            foundc = "no"
            if bb == "no results":               
                #print ("no results found...attempting alternate search")
                pass
            elif (len(bb['entries']) == 0):
                #print ("Nothing found for : " + str(findcomic[findloop]) + " Issue: #" + str(findcomiciss[findloop]))
                foundc = "no"
            else:
                #print ("Found for: " + str(findcomic[findloop]))
                for entry in bb['entries']:
                    #print str(entry['title'])
                    cleantitle = helpers.cleanName(str(entry['title']))
                    if done:
                        break
                    #print ("title: " + str(cleantitle))
                    #print ("link: " + entry['link'])
                #let's narrow search down - take out year (2010), (2011), etc
                #let's check for first occurance of '(' as generally indicates
                #that the 'title' has ended

                    ripperlist=['digital-',
                                'empire',
                                'dcp']
                    #this takes care of the brackets :)                    
#                    m = re.findall(r"\((\w+)\)", cleantitle)                 
                    m = re.findall('[^()]+', cleantitle)
                    lenm = len(m)
                    #print ("there are " + str(lenm) + " words.")
                    cnt = 0
                    while (cnt < lenm):
                        if m[cnt] is None: break
                        #if m[cnt] == ' ': print ("space detected")
                        #print (str(cnt) + ". Bracket Word: " + m[cnt] )                        
                        if cnt == 0:
                            comic_andiss = m[cnt]
                            print ("Comic:" + str(comic_andiss))
                        if m[cnt][:-2] == '19' or m[cnt][:-2] == '20': 
                            print ("year detected!")
                            result_comyear = m[cnt]
                            if str(comyear) in result_comyear:
                                print (str(comyear) + " - right - years match baby!")
                                yearmatch = "true"
                            else:
                                print (str(comyear) + " - not right - years don't match ")
                                yearmatch = "false"
                        if 'digital' in m[cnt] and len(m[cnt]) == 7: 
                            pass
                            #print ("digital edition")
                        if ' of ' in m[cnt]:
                            #print ("mini-series detected : " + str(m[cnt]))
                            result_of = m[cnt]
                        if 'cover' in m[cnt]: 
                            #print ("covers detected")
                            result_comcovers = m[cnt]
                        for ripper in ripperlist:
                            if ripper in m[cnt]:
                                #print ("Scanner detected:" + str(m[cnt]))
                                result_comscanner = m[cnt]
                        cnt+=1

                    if yearmatch == "false": break
                    
                    splitit = []   
                    watchcomic_split = []
                    comic_iss = re.sub('[\-\:\,]', '', str(comic_andiss))
                    splitit = comic_iss.split(None)
                    watchcomic_split = findcomic[findloop].split(None)

                    bmm = re.findall('v\d', comic_iss)
                    #print ("vers - " + str(bmm))
                    if len(bmm) > 0: splitst = len(splitit) - 2
                    else: splitst = len(splitit) - 1
                    if (splitst) != len(watchcomic_split):
                        print ("incorrect comic lengths...not a match")
                        if str(splitit[0]).lower() == "the":
                            print ("THE word detected...attempting to adjust pattern matching")
                            splitit[0] = splitit[4:]
                    else:
                        print ("length match..proceeding")
                        n = 0
                        scount = 0
                        #print ("length:" + str(len(splitit)))
                        while ( n <= len(splitit)-1 ):
                            if n < len(splitit)-1:
                                #print ( str(n) + ". Comparing: " + watchcomic_split[n] + " .to. " + splitit[n] )
                                if str(watchcomic_split[n].lower()) in str(splitit[n].lower()):
                                    #print ("word matched on : " + splitit[n])
                                    scount+=1
                                #elif ':' in splitit[n] or '-' in splitit[n]:
                                #    splitrep = splitit[n].replace('-', '')
                                #    print ("non-character keyword...skipped on " + splitit[n])
                                elif len(splitit[n]) < 3 or (splitit[n][1:]) == "v":
                                    #print ("possible verisoning..checking")
                                    #we hit a versioning # - account for it
                                    if splitit[n][2:].isdigit():
                                        comicversion = str(splitit[n])
                                        #print ("version found:" + str(comicversion))
                            else:
                                if splitit[n].isdigit():
                                    print ("issue detected")
                                    comiss = splitit[n]
                                    comicNAMER = n - 1
                                    comNAME = splitit[0]
                                    cmnam = 1
                                    while (cmnam < comicNAMER):
                                        comNAME = str(comNAME) + " " + str(splitit[cmnam])
                                        cmnam+=1
                                    #print ("comic: " + str(comNAME))
                                else:
                                    #print ("non-match for: " + splitit[n])
                                    pass
                            n+=1
                        spercent = ( scount/int(len(splitit)) ) * 100
                        #print (str(spercent) + "% match")
                        #if spercent >= 75: print ("it's a go captain...")
                        #if spercent < 75: print ("failure - we only got " + str(spercent) + "% right!")
                        print ("this should be a match!")
                        #issue comparison now as well
                        if int(findcomiciss[findloop]) == int(comiss):
                            print ("issues match!")

                        ## -- inherit issue. Comic year is non-standard. nzb year is the year
                        ## -- comic was printed, not the start year of the comic series and
                        ## -- thus the deciding component if matches are correct or not
                            linkstart = os.path.splitext(entry['link'])[0]
                        #following is JUST for nzb.su
                            if nzbprov == 'nzb.su':
                                linkit = os.path.splitext(entry['link'])[1]
                                #print ("linkit: " + str(linkit))
                                linkit = linkit.replace("&", "%26")
                                linkapi = str(linkstart) + str(linkit)
                            else:
                                # this should work for every other provider
                                linkstart = linkstart.replace("&", "%26")
                                linkapi = str(linkstart)
                            #here we distinguish between rename and not.
                            #blackhole functinality---
                            #let's download the file to a temporary cache.

                            if mylar.BLACKHOLE:
                                if os.path.exists(mylar.BLACKHOLE_DIR):
                                    filenamenzb = str(ComicName) + " " + str(IssueNumber) + " (" + str(comyear) + ").nzb"
                                    urllib.urlretrieve(linkapi, str(mylar.BLACKHOLE_DIR) + str(filenamenzb))
                                    logger.info(u"Successfully sent .nzb to your Blackhole directory : " + str(mylar.BLACKHOLE_DIR) + str(filenamenzb) )
                            #end blackhole

                            else:
                                tmppath = mylar.CACHE_DIR
                                print ("cache directory set to: " + str(tmppath))
                                if os.path.exists(tmppath):
                                    filenamenzb = os.path.split(linkapi)[1]
                                    #filenzb = os.path.join(tmppath,filenamenzb)
                                    if nzbprov == 'nzb.su':
                                        filenzb = linkstart[21:]
                                    if nzbprov == 'experimental':
                                        filenzb = filenamenzb[6:]
                                    if nzbprov == 'dognzb':
                                        filenzb == str(filenamenzb)
                                    savefile = str(tmppath) + "/" + str(filenzb) + ".nzb"
                                else:
                                #let's make the dir.
                                    try:
                                        os.makedirs(str(mylar.CACHE_DIR))
                                        logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))
                                        savefile = str(mylar.CACHE_DIR) + "/" + str(filenzb) + ".nzb"

                                    except OSError.e:
                                        if e.errno != errno.EEXIST:
                                            raise
                                print ("savefile set to: " + str(savefile))
                                urllib.urlretrieve(linkapi, str(savefile))
								#print (str(mylar.RENAME_FILES))
				print ("sucessfully retrieve nzb to : " + str(savefile))			
								#check sab for current pause status
                                print ("sab host set to :" + str(mylar.SAB_HOST))
                                sabqstatusapi = str(mylar.SAB_HOST) + "/api?mode=qstatus&output=xml&apikey=" + str(mylar.SAB_APIKEY)
                                from xml.dom.minidom import parseString
                                import urllib2
                                file = urllib2.urlopen(sabqstatusapi);
                                data = file.read()
                                file.close()
                                dom = parseString(data)
                                for node in dom.getElementsByTagName('paused'):
									pausestatus = node.firstChild.wholeText
									#print pausestatus
                                if pausestatus != 'True':
									#pause sab first because it downloads too quick (cbr's are small!)
                                    pauseapi = str(mylar.SAB_HOST) + "/api?mode=pause&apikey=" + str(mylar.SAB_APIKEY)
                                    urllib.urlopen(pauseapi);
                                    print "Queue paused"
                                else:
				    print "Queue already paused"
                                
                                if mylar.RENAME_FILES == 1:
                                    #print ("Saved file to: " + str(savefile))
                                    tmpapi = str(mylar.SAB_HOST) + "/api?mode=addlocalfile&name=" + str(savefile) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY)
                                else:
                                    tmpapi = str(mylar.SAB_HOST) + "/api?mode=addurl&name=" + str(linkapi) + "&pp=3&cat=" + str(mylar.SAB_CATEGORY) + "&script=ComicRN.py&apikey=" + str(mylar.SAB_APIKEY)
                                print ("sab api string:" + str(tmpapi))
                                time.sleep(5)
                                urllib.urlopen(tmpapi);
                                if mylar.RENAME_FILES == 1:
                                    #let's give it 5 extra seconds to retrieve the nzb data...

                                    time.sleep(5)
                              
                                    outqueue = str(mylar.SAB_HOST) + "/api?mode=queue&start=START&limit=LIMIT&output=xml&apikey=" + str(mylar.SAB_APIKEY)
                                    print ("outqueue line generated")
                                    urllib.urlopen(outqueue);
                                    time.sleep(5)
                                    print ("passed api request to SAB")
                                #<slots><slot><filename>.nzb filename
                                #chang nzbfilename to include series(SAB will auto rename based on this)
                                #api?mode=queue&name=rename&value=<filename_nzi22ks>&value2=NEWNAME
                                    from xml.dom.minidom import parseString
                                    import urllib2
                                    file = urllib2.urlopen(outqueue);
                                    data = file.read()
                                    file.close()
                                    dom = parseString(data)
                                    queue_slots = dom.getElementsByTagName('filename')
                                    queue_cnt = len(queue_slots)
                                    print ("there are " + str(queue_cnt) + " things in SABnzbd's queue")
                                    que = 0
                                    slotmatch = "no"
                                    for queue in queue_slots:
                                    #retrieve the first xml tag (<tag>data</tag>)
                                    #that the parser finds with name tagName:
                                        queue_file = dom.getElementsByTagName('filename')[que].firstChild.wholeText
                                        while ('Trying to fetch NZB' in queue_file):
                                            #let's keep waiting until nzbname is resolved by SABnzbd
                                            time.sleep(5)
                                            file = urllib2.urlopen(outqueue);
                                            data = file.read()
                                            file.close()
                                            dom = parseString(data)
                                            queue_file = dom.getElementsByTagName('filename')[que].firstChild.wholeText
                                        print (str(queue_file))
                                        print (str(filenzb))                              
                                        queue_file = queue_file.replace("_", " ")
                                        if str(queue_file) in str(filenzb):
                                            print ("matched")
                                            slotmatch = "yes"
                                            slot_nzoid = dom.getElementsByTagName('nzo_id')[que].firstChild.wholeText
                                            print ("slot_nzoid: " + str(slot_nzoid))
                                            break
                                        que+=1
                                    if slotmatch == "yes":
                                        if mylar.REPLACE_SPACES:
                                            repchar = mylar.REPLACE_CHAR
                                        else:
                                            repchar = ' '
                                        #let's make sure there's no crap in the ComicName since it's O.G.
                                        ComicNM = re.sub('[\:\,]', '', str(ComicName))
                                        renameit = str(ComicNM) + " " + str(IssueNumber) + " (" + str(SeriesYear) + ")" + " " + "(" + str(comyear) + ")"
                                        renameit = renameit.replace(' ', repchar)
                                        nzo_ren = str(mylar.SAB_HOST) + "/api?mode=queue&name=rename&apikey=" + str(mylar.SAB_APIKEY) + "&value=" + str(slot_nzoid) + "&value2=" + str(renameit)
                                        print ("attempting to rename queue to " + str(nzo_ren))
                                        urllib2.urlopen(nzo_ren);
                                        print ("renamed!")
                                        #delete the .nzb now.
                                        #delnzb = str(mylar.PROG_DIR) + "/" + str(filenzb) + ".nzb"
                                        #if mylar.PROG_DIR is not "/":
                                             #os.remove(delnzb)
                                            #we need to track nzo_id to make sure finished downloaded with SABnzbd.
                                            #controlValueDict = {"nzo_id":      str(slot_nzoid)}
                                            #newValueDict = {"ComicName":       str(ComicName),
                                            #                "ComicYEAR":       str(comyear),
                                            #                "ComicIssue":      str(IssueNumber),
                                            #                "name":            str(filenamenzb)}
                                            #print ("updating SABLOG")
                                            #myDB = db.DBConnection()
                                            #myDB.upsert("sablog", newValueDict, controlValueDict)
                                    else: logger.info(u"Couldn't locate file in SAB - are you sure it's being downloaded?")
                                #resume sab if it was running before we started
                                if pausestatus != 'True':
                                    #let's unpause queue now that we did our jobs.
                                    resumeapi = str(mylar.SAB_HOST) + "/api?mode=resume&apikey=" + str(mylar.SAB_APIKEY)
                                    urllib.urlopen(resumeapi);
                                    #print "Queue resumed"
                                #else:
									#print "Queue already paused"

                            #raise an exception to break out of loop
                            foundc = "yes"
                            done = True
                            break
                        else:
                            #print ("issues don't match..")
                            foundc = "no"
                if done == True: break
            cmloopit-=1
        findloop+=1
        if foundc == "yes":
            foundcomic.append("yes")
            logger.info(u"Found :" + str(ComicName) + " (" + str(comyear) + ") issue: " + str(IssueNumber) + " using " + str(nzbprov))
            break
        elif foundc == "no" and nzbpr <> 0:
            logger.info(u"More than one search provider given - trying next one.")
        elif foundc == "no" and nzbpr == 0:
            foundcomic.append("no")
            logger.info(u"Couldn't find Issue " + str(IssueNumber) + " of " + str(ComicName) + "(" + str(comyear) + "). Status kept as wanted." )
            break
    return foundc
Example #45
0
    def rename_file(self,
                    ofilename,
                    issue=None,
                    annualize=None,
                    arc=False,
                    file_format=None
                    ):  #comicname, issue, comicyear=None, issueid=None)
        comicid = self.comicid  # it's coming in unicoded...
        issueid = self.issueid

        if file_format is None:
            file_format = mylar.CONFIG.FILE_FORMAT

        logger.fdebug(type(comicid))
        logger.fdebug(type(issueid))
        logger.fdebug('comicid: %s' % comicid)
        logger.fdebug('issue# as per cv: %s' % issue)
        logger.fdebug('issueid:' + str(issueid))

        if issueid is None:
            logger.fdebug('annualize is ' + str(annualize))
            if arc:
                #this has to be adjusted to be able to include story arc issues that span multiple arcs
                chkissue = self.myDB.selectone(
                    "SELECT * from storyarcs WHERE ComicID=? AND Issue_Number=?",
                    [comicid, issue]).fetchone()
            else:
                chkissue = self.myDB.selectone(
                    "SELECT * from issues WHERE ComicID=? AND Issue_Number=?",
                    [comicid, issue]).fetchone()
                if all([
                        chkissue is None, annualize is None,
                        not mylar.CONFIG.ANNUALS_ON
                ]):
                    chkissue = self.myDB.selectone(
                        "SELECT * from annuals WHERE ComicID=? AND Issue_Number=?",
                        [comicid, issue]).fetchone()

            if chkissue is None:
                #rechk chkissue against int value of issue #
                if arc:
                    chkissue = self.myDB.selectone(
                        "SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?",
                        [comicid, issuedigits(issue)]).fetchone()
                else:
                    chkissue = self.myDB.selectone(
                        "SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?",
                        [comicid, issuedigits(issue)]).fetchone()
                    if all([
                            chkissue is None, annualize == 'yes',
                            mylar.CONFIG.ANNUALS_ON
                    ]):
                        chkissue = self.myDB.selectone(
                            "SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?",
                            [comicid, issuedigits(issue)]).fetchone()

                if chkissue is None:
                    logger.error('Invalid Issue_Number - please validate.')
                    return
                else:
                    logger.info(
                        'Int Issue_number compare found. continuing...')
                    issueid = chkissue['IssueID']
            else:
                issueid = chkissue['IssueID']

        #use issueid to get publisher, series, year, issue number
        logger.fdebug('issueid is now : ' + str(issueid))
        if arc:
            issueinfo = self.myDB.selectone(
                "SELECT * from storyarcs WHERE ComicID=? AND IssueID=? AND StoryArc=?",
                [comicid, issueid, arc]).fetchone()
        else:
            issueinfo = self.myDB.selectone(
                "SELECT * from issues WHERE ComicID=? AND IssueID=?",
                [comicid, issueid]).fetchone()
            if issueinfo is None:
                logger.fdebug('not an issue, checking against annuals')
                issueinfo = self.myDB.selectone(
                    "SELECT * from annuals WHERE ComicID=? AND IssueID=?",
                    [comicid, issueid]).fetchone()
                if issueinfo is None:
                    logger.fdebug(
                        'Unable to rename - cannot locate issue id within db')
                    return
                else:
                    annualize = True

        if issueinfo is None:
            logger.fdebug(
                'Unable to rename - cannot locate issue id within db')
            return

        #remap the variables to a common factor.
        if arc:
            issuenum = issueinfo['IssueNumber']
            issuedate = issueinfo['IssueDate']
            publisher = issueinfo['IssuePublisher']
            series = issueinfo['ComicName']
            seriesfilename = series  #Alternate FileNaming is not available with story arcs.
            seriesyear = issueinfo['SeriesYear']
            arcdir = helpers.filesafe(issueinfo['StoryArc'])
            if mylar.CONFIG.REPLACE_SPACES:
                arcdir = arcdir.replace(' ', mylar.CONFIG.REPLACE_CHAR)
            if mylar.CONFIG.STORYARCDIR:
                storyarcd = os.path.join(mylar.CONFIG.DESTINATION_DIR,
                                         "StoryArcs", arcdir)
                logger.fdebug('Story Arc Directory set to : ' + storyarcd)
            else:
                logger.fdebug('Story Arc Directory set to : ' +
                              mylar.CONFIG.GRABBAG_DIR)
                storyarcd = os.path.join(mylar.CONFIG.DESTINATION_DIR,
                                         mylar.CONFIG.GRABBAG_DIR)

            comlocation = storyarcd
            comversion = None  #need to populate this.

        else:
            issuenum = issueinfo['Issue_Number']
            issuedate = issueinfo['IssueDate']
            publisher = self.comic['ComicPublisher']
            series = self.comic['ComicName']
            if self.comic['AlternateFileName'] is None or self.comic[
                    'AlternateFileName'] == 'None':
                seriesfilename = series
            else:
                seriesfilename = self.comic['AlternateFileName']
                logger.fdebug(
                    'Alternate File Naming has been enabled for this series. Will rename series title to : '
                    + seriesfilename)
            seriesyear = self.comic['ComicYear']
            comlocation = self.comic['ComicLocation']
            comversion = self.comic['ComicVersion']

        unicodeissue = issuenum

        if type(issuenum) == str:
            vals = {
                '\xbd': '.5',
                '\xbc': '.25',
                '\xbe': '.75',
                '\u221e': '9999999999',
                '\xe2': '9999999999'
            }
        else:
            vals = {
                '\xbd': '.5',
                '\xbc': '.25',
                '\xbe': '.75',
                '\\u221e': '9999999999',
                '\xe2': '9999999999'
            }
        x = [vals[key] for key in vals if key in issuenum]
        if x:
            issuenum = x[0]
            logger.fdebug('issue number formatted: %s' % issuenum)

        #comicid = issueinfo['ComicID']
        #issueno = str(issuenum).split('.')[0]
        issue_except = 'None'
        issue_exceptions = [
            'AU', 'INH', 'NOW', 'AI', 'MU', 'A', 'B', 'C', 'X', 'O'
        ]
        valid_spaces = ('.', '-')
        for issexcept in issue_exceptions:
            if issexcept.lower() in issuenum.lower():
                logger.fdebug('ALPHANUMERIC EXCEPTION : [' + issexcept + ']')
                v_chk = [v for v in valid_spaces if v in issuenum]
                if v_chk:
                    iss_space = v_chk[0]
                    logger.fdebug('character space denoted as : ' + iss_space)
                else:
                    logger.fdebug('character space not denoted.')
                    iss_space = ''
#                    if issexcept == 'INH':
#                       issue_except = '.INH'
                if issexcept == 'NOW':
                    if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
#                       issue_except = '.NOW'

                issue_except = iss_space + issexcept
                logger.fdebug('issue_except denoted as : ' + issue_except)
                issuenum = re.sub("[^0-9]", "", issuenum)
                break

#            if 'au' in issuenum.lower() and issuenum[:1].isdigit():
#                issue_except = ' AU'
#            elif 'ai' in issuenum.lower() and issuenum[:1].isdigit():
#                issuenum = re.sub("[^0-9]", "", issuenum)
#                issue_except = ' AI'
#            elif 'inh' in issuenum.lower() and issuenum[:1].isdigit():
#                issuenum = re.sub("[^0-9]", "", issuenum)
#                issue_except = '.INH'
#            elif 'now' in issuenum.lower() and issuenum[:1].isdigit():
#                if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
#                issuenum = re.sub("[^0-9]", "", issuenum)
#                issue_except = '.NOW'
        if '.' in issuenum:
            iss_find = issuenum.find('.')
            iss_b4dec = issuenum[:iss_find]
            if iss_find == 0:
                iss_b4dec = '0'
            iss_decval = issuenum[iss_find + 1:]
            if iss_decval.endswith('.'):
                iss_decval = iss_decval[:-1]
            if int(iss_decval) == 0:
                iss = iss_b4dec
                issdec = int(iss_decval)
                issueno = iss
            else:
                if len(iss_decval) == 1:
                    iss = iss_b4dec + "." + iss_decval
                    issdec = int(iss_decval) * 10
                else:
                    iss = iss_b4dec + "." + iss_decval.rstrip('0')
                    issdec = int(iss_decval.rstrip('0')) * 10
                issueno = iss_b4dec
        else:
            iss = issuenum
            issueno = iss
        # issue zero-suppression here
        if mylar.CONFIG.ZERO_LEVEL == "0":
            zeroadd = ""
        else:
            if mylar.CONFIG.ZERO_LEVEL_N == "none": zeroadd = ""
            elif mylar.CONFIG.ZERO_LEVEL_N == "0x": zeroadd = "0"
            elif mylar.CONFIG.ZERO_LEVEL_N == "00x": zeroadd = "00"

        logger.fdebug('Zero Suppression set to : ' +
                      str(mylar.CONFIG.ZERO_LEVEL_N))
        prettycomiss = None

        if issueno.isalpha():
            logger.fdebug('issue detected as an alpha.')
            prettycomiss = str(issueno)
        else:
            try:
                x = float(issuenum)
                #validity check
                if x < 0:
                    logger.info(
                        'I\'ve encountered a negative issue #: %s. Trying to accomodate.'
                        % issueno)
                    prettycomiss = '-' + str(zeroadd) + str(issueno[1:])
                elif x == 9999999999:
                    logger.fdebug('Infinity issue found.')
                    issuenum = 'infinity'
                elif x >= 0:
                    pass
                else:
                    raise ValueError
            except ValueError as e:
                logger.warn(
                    'Unable to properly determine issue number [ %s] - you should probably log this on github for help.'
                    % issueno)
                return

        if prettycomiss is None and len(str(issueno)) > 0:
            #if int(issueno) < 0:
            #    self._log("issue detected is a negative")
            #    prettycomiss = '-' + str(zeroadd) + str(abs(issueno))
            if int(issueno) < 10:
                logger.fdebug('issue detected less than 10')
                if '.' in iss:
                    if int(iss_decval) > 0:
                        issueno = str(iss)
                        prettycomiss = str(zeroadd) + str(iss)
                    else:
                        prettycomiss = str(zeroadd) + str(int(issueno))
                else:
                    prettycomiss = str(zeroadd) + str(iss)
                if issue_except != 'None':
                    prettycomiss = str(prettycomiss) + issue_except
                logger.fdebug('Zero level supplement set to ' +
                              str(mylar.CONFIG.ZERO_LEVEL_N) +
                              '. Issue will be set as : ' + str(prettycomiss))
            elif int(issueno) >= 10 and int(issueno) < 100:
                logger.fdebug(
                    'issue detected greater than 10, but less than 100')
                if mylar.CONFIG.ZERO_LEVEL_N == "none":
                    zeroadd = ""
                else:
                    zeroadd = "0"
                if '.' in iss:
                    if int(iss_decval) > 0:
                        issueno = str(iss)
                        prettycomiss = str(zeroadd) + str(iss)
                    else:
                        prettycomiss = str(zeroadd) + str(int(issueno))
                else:
                    prettycomiss = str(zeroadd) + str(iss)
                if issue_except != 'None':
                    prettycomiss = str(prettycomiss) + issue_except
                logger.fdebug('Zero level supplement set to ' +
                              str(mylar.CONFIG.ZERO_LEVEL_N) +
                              '.Issue will be set as : ' + str(prettycomiss))
            else:
                logger.fdebug('issue detected greater than 100')
                if issuenum == 'infinity':
                    prettycomiss = 'infinity'
                else:
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                    prettycomiss = str(issueno)
                if issue_except != 'None':
                    prettycomiss = str(prettycomiss) + issue_except
                logger.fdebug('Zero level supplement set to ' +
                              str(mylar.CONFIG.ZERO_LEVEL_N) +
                              '. Issue will be set as : ' + str(prettycomiss))
        elif len(str(issueno)) == 0:
            prettycomiss = str(issueno)
            logger.fdebug(
                'issue length error - cannot determine length. Defaulting to None:  '
                + str(prettycomiss))

        logger.fdebug('Pretty Comic Issue is : ' + str(prettycomiss))
        if mylar.CONFIG.UNICODE_ISSUENUMBER:
            logger.fdebug('Setting this to Unicode format as requested: %s' %
                          prettycomiss)
            prettycomiss = unicodeissue

        issueyear = issuedate[:4]
        month = issuedate[5:7].replace('-', '').strip()
        month_name = helpers.fullmonth(month)
        if month_name is None:
            month_name = 'None'
        logger.fdebug('Issue Year : ' + str(issueyear))
        logger.fdebug('Publisher: ' + publisher)
        logger.fdebug('Series: ' + series)
        logger.fdebug('Year: ' + str(seriesyear))
        logger.fdebug('Comic Location: ' + comlocation)

        if self.comic['Corrected_Type'] is not None:
            if self.comic['Type'] != self.comic['Corrected_Type']:
                booktype = self.comic['Corrected_Type']
            else:
                booktype = self.comic['Type']
        else:
            booktype = self.comic['Type']

        if booktype == 'Print' or all(
            [booktype != 'Print', mylar.CONFIG.FORMAT_BOOKTYPE is False]):
            chunk_fb = re.sub('\$Type', '', file_format)
            chunk_b = re.compile(r'\s+')
            chunk_file_format = chunk_b.sub(' ', chunk_fb)
        else:
            chunk_file_format = file_format

        if any([comversion is None, booktype != 'Print']):
            comversion = 'None'

        #if comversion is None, remove it so it doesn't populate with 'None'
        if comversion == 'None':
            chunk_f_f = re.sub('\$VolumeN', '', chunk_file_format)
            chunk_f = re.compile(r'\s+')
            chunk_file_format = chunk_f.sub(' ', chunk_f_f)
            logger.fdebug(
                'No version # found for series, removing from filename')
            logger.fdebug("new format: " + str(chunk_file_format))

        if annualize is None:
            chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
            chunk_f = re.compile(r'\s+')
            chunk_file_format = chunk_f.sub(' ', chunk_f_f)
            logger.fdebug('not an annual - removing from filename paramaters')
            logger.fdebug('new format: ' + str(chunk_file_format))

        else:
            logger.fdebug('chunk_file_format is: ' + str(chunk_file_format))
            if mylar.CONFIG.ANNUALS_ON:
                if 'annual' in series.lower():
                    if '$Annual' not in chunk_file_format:  # and 'annual' not in ofilename.lower():
                        #if it's an annual, but $annual isn't specified in file_format, we need to
                        #force it in there, by default in the format of $Annual $Issue
                        #prettycomiss = "Annual " + str(prettycomiss)
                        logger.fdebug(
                            '[%s][ANNUALS-ON][ANNUAL IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                    else:
                        #because it exists within title, strip it then use formatting tag for placement of wording.
                        chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
                        chunk_f = re.compile(r'\s+')
                        chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                        logger.fdebug(
                            '[%s][ANNUALS-ON][ANNUAL IN SERIES][ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                else:
                    if '$Annual' not in chunk_file_format:  # and 'annual' not in ofilename.lower():
                        #if it's an annual, but $annual isn't specified in file_format, we need to
                        #force it in there, by default in the format of $Annual $Issue
                        prettycomiss = "Annual %s" % prettycomiss
                        logger.fdebug(
                            '[%s][ANNUALS-ON][ANNUAL NOT IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                    else:
                        logger.fdebug(
                            '[%s][ANNUALS-ON][ANNUAL NOT IN SERIES][ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))

            else:
                #if annuals aren't enabled, then annuals are being tracked as independent series.
                #annualize will be true since it's an annual in the seriesname.
                if 'annual' in series.lower():
                    if '$Annual' not in chunk_file_format:  # and 'annual' not in ofilename.lower():
                        #if it's an annual, but $annual isn't specified in file_format, we need to
                        #force it in there, by default in the format of $Annual $Issue
                        #prettycomiss = "Annual " + str(prettycomiss)
                        logger.fdebug(
                            '[%s][ANNUALS-OFF][ANNUAL IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                    else:
                        #because it exists within title, strip it then use formatting tag for placement of wording.
                        chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
                        chunk_f = re.compile(r'\s+')
                        chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                        logger.fdebug(
                            '[%s][ANNUALS-OFF][ANNUAL IN SERIES][ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                else:
                    if '$Annual' not in chunk_file_format:  # and 'annual' not in ofilename.lower():
                        #if it's an annual, but $annual isn't specified in file_format, we need to
                        #force it in there, by default in the format of $Annual $Issue
                        prettycomiss = "Annual %s" % prettycomiss
                        logger.fdebug(
                            '[%s][ANNUALS-OFF][ANNUAL NOT IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))
                    else:
                        logger.fdebug(
                            '[%s][ANNUALS-OFF][ANNUAL NOT IN SERIES][ANNUAL FORMAT] prettycomiss: %s'
                            % (series, prettycomiss))

                logger.fdebug('Annual detected within series title of ' +
                              series + '. Not auto-correcting issue #')

        seriesfilename = seriesfilename  #.encode('ascii', 'ignore').strip()
        filebad = [
            ':', ',', '/', '?', '!', '\'', '\"', '\*'
        ]  #in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
        for dbd in filebad:
            if dbd in seriesfilename:
                if any([dbd == '/', dbd == '*']):
                    repthechar = '-'
                else:
                    repthechar = ''
                seriesfilename = seriesfilename.replace(dbd, repthechar)
                logger.fdebug(
                    'Altering series name due to filenaming restrictions: ' +
                    seriesfilename)

        publisher = re.sub('!', '', publisher)

        file_values = {
            '$Series': seriesfilename,
            '$Issue': prettycomiss,
            '$Year': issueyear,
            '$series': series.lower(),
            '$Publisher': publisher,
            '$publisher': publisher.lower(),
            '$VolumeY': 'V' + str(seriesyear),
            '$VolumeN': comversion,
            '$monthname': month_name,
            '$month': month,
            '$Annual': 'Annual',
            '$Type': booktype
        }

        extensions = ('.cbr', '.cbz', '.cb7')

        if ofilename.lower().endswith(extensions):
            path, ext = os.path.splitext(ofilename)

        if file_format == '':
            logger.fdebug(
                'Rename Files is not enabled - keeping original filename.')
            #check if extension is in nzb_name - will screw up otherwise
            if ofilename.lower().endswith(extensions):
                nfilename = ofilename[:-4]
            else:
                nfilename = ofilename
        else:
            chunk_file_format = re.sub('[()|[]]', '',
                                       chunk_file_format).strip()
            nfilename = helpers.replace_all(chunk_file_format, file_values)
            if mylar.CONFIG.REPLACE_SPACES:
                #mylar.CONFIG.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                nfilename = nfilename.replace(' ', mylar.CONFIG.REPLACE_CHAR)

        nfilename = re.sub('[\,\:]', '', nfilename) + ext.lower()
        logger.fdebug('New Filename: ' + nfilename)

        if mylar.CONFIG.LOWERCASE_FILENAMES:
            nfilename = nfilename.lower()
            dst = os.path.join(comlocation, nfilename)
        else:
            dst = os.path.join(comlocation, nfilename)

        logger.fdebug('Source: ' + ofilename)
        logger.fdebug('Destination: ' + dst)

        rename_this = {
            "destination_dir": dst,
            "nfilename": nfilename,
            "issueid": issueid,
            "comicid": comicid
        }

        return rename_this
Example #46
0
    def Process(self):
        module = '[FAILED-DOWNLOAD]'

        myDB = db.DBConnection()

        if self.nzb_name and self.nzb_folder:
            self._log('Failed download has been detected: ' + self.nzb_name + ' in ' + self.nzb_folder)

            #since this has already been passed through the search module, which holds the IssueID in the nzblog,
            #let's find the matching nzbname and pass it the IssueID in order to mark it as Failed and then return
            #to the search module and continue trucking along.

            nzbname = self.nzb_name
            #remove extensions from nzb_name if they somehow got through (Experimental most likely)
            extensions = ('.cbr', '.cbz')

            if nzbname.lower().endswith(extensions):
                fd, ext = os.path.splitext(nzbname)
                self._log("Removed extension from nzb: " + ext)
                nzbname = re.sub(str(ext), '', str(nzbname))

            #replace spaces
            nzbname = re.sub(' ', '.', str(nzbname))
            nzbname = re.sub('[\,\:\?\'\(\)]', '', str(nzbname))
            nzbname = re.sub('[\&]', 'and', str(nzbname))
            nzbname = re.sub('_', '.', str(nzbname))

            logger.fdebug(module + ' After conversions, nzbname is : ' + str(nzbname))
            self._log("nzbname: " + str(nzbname))

            nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()

            if nzbiss is None:
                self._log("Failure - could not initially locate nzbfile in my database to rename.")
                logger.fdebug(module + ' Failure - could not locate nzbfile initially')
                # if failed on spaces, change it all to decimals and try again.
                nzbname = re.sub('_', '.', str(nzbname))
                self._log("trying again with this nzbname: " + str(nzbname))
                logger.fdebug(module + ' Trying to locate nzbfile again with nzbname of : ' + str(nzbname))
                nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
                if nzbiss is None:
                    logger.error(module + ' Unable to locate downloaded file to rename. PostProcessing aborted.')
                    self._log('Unable to locate downloaded file to rename. PostProcessing aborted.')
                    self.valreturn.append({"self.log": self.log,
                                           "mode": 'stop'})

                    return self.queue.put(self.valreturn)
                else:
                    self._log("I corrected and found the nzb as : " + str(nzbname))
                    logger.fdebug(module + ' Auto-corrected and found the nzb as : ' + str(nzbname))
                    issueid = nzbiss['IssueID']
            else:
                issueid = nzbiss['IssueID']
                logger.fdebug(module + ' Issueid: ' + str(issueid))
                sarc = nzbiss['SARC']
                #use issueid to get publisher, series, year, issue number

        else:
            issueid = self.issueid
            nzbiss = myDB.selectone("SELECT * from nzblog WHERE IssueID=?", [issueid]).fetchone()
            if nzbiss is None:
                logger.info(module + ' Cannot locate corresponding record in download history. This will be implemented soon.')
                self.valreturn.append({"self.log": self.log,
                                       "mode": 'stop'})
                return self.queue.put(self.valreturn)

            nzbname = nzbiss['NZBName']

        # find the provider.
        self.prov = nzbiss['PROVIDER']
        logger.info(module + ' Provider: ' + self.prov)

        # grab the id.
        self.id = nzbiss['ID']
        logger.info(module + ' ID: ' + self.id)
        annchk = "no"

        if 'annual' in nzbname.lower():
            logger.info(module + ' Annual detected.')
            annchk = "yes"
            issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
        else:
            issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()

        if issuenzb is not None:
            logger.info(module + ' issuenzb found.')
            if helpers.is_number(issueid):
                sandwich = int(issuenzb['IssueID'])
        else:
            logger.info(module + ' issuenzb not found.')
            #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
            #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
            if 'S' in issueid:
                sandwich = issueid
            elif 'G' in issueid or '-' in issueid:
                sandwich = 1
        try:
            if helpers.is_number(sandwich):
                if sandwich < 900000:
            # if sandwich is less than 900000 it's a normal watchlist download. Bypass.
                    pass
            else:
                logger.info('Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!')
                self._log(' Unable to locate downloaded file to rename. PostProcessing aborted.')
                self.valreturn.append({"self.log": self.log,
                                       "mode": 'stop'})
                return self.queue.put(self.valreturn)
        except NameError:
            logger.info('sandwich was not defined. Post-processing aborted...')
            self.valreturn.append({"self.log": self.log,
                                       "mode": 'stop'})

            return self.queue.put(self.valreturn)

        comicid = issuenzb['ComicID']
        issuenumOG = issuenzb['Issue_Number']
        logger.info(module + ' Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' that was downloaded using ' + self.prov)
        self._log('Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' downloaded using ' + self.prov)

        logger.info(module + ' Marking as a Failed Download.')
        self._log('Marking as a Failed Download.')

        ctrlVal = {"IssueID": issueid}
        Vals = {"Status":    'Failed'}
        myDB.upsert("issues", Vals, ctrlVal)

        ctrlVal = {"ID":       self.id,
                   "Provider": self.prov,
                   "NZBName":  nzbname}
        Vals = {"Status":       'Failed',
                "ComicName":    issuenzb['ComicName'],
                "Issue_Number": issuenzb['Issue_Number'],
                "IssueID":      issueid,
                "ComicID":      comicid,
                "DateFailed":   helpers.now()}
        myDB.upsert("failed", Vals, ctrlVal)

        logger.info(module + ' Successfully marked as Failed.')
        self._log('Successfully marked as Failed.')

        if mylar.CONFIG.FAILED_AUTO:
            logger.info(module + ' Sending back to search to see if we can find something that will not fail.')
            self._log('Sending back to search to see if we can find something better that will not fail.')
            self.valreturn.append({"self.log":    self.log,
                                   "mode":        'retry',
                                   "issueid":     issueid,
                                   "comicid":     comicid,
                                   "comicname":   issuenzb['ComicName'],
                                   "issuenumber": issuenzb['Issue_Number'],
                                   "annchk":      annchk})

            return self.queue.put(self.valreturn)
        else:
            logger.info(module + ' Stopping search here as automatic handling of failed downloads is not enabled *hint*')
            self._log('Stopping search here as automatic handling of failed downloads is not enabled *hint*')
            self.valreturn.append({"self.log": self.log,
                                   "mode": 'stop'})
            return self.queue.put(self.valreturn)
Example #47
0
def run(dirName,
        nzbName=None,
        issueid=None,
        comversion=None,
        manual=None,
        filename=None,
        module=None,
        manualmeta=False):
    if module is None:
        module = ''
    module += '[META-TAGGER]'

    logger.fdebug(module + ' dirName:' + dirName)

    # 2015-11-23: Recent CV API changes restrict the rate-limit to 1 api request / second.
    # ComicTagger has to be included now with the install as a timer had to be added to allow for the 1/second rule.
    comictagger_cmd = os.path.join(mylar.CMTAGGER_PATH, 'comictagger.py')
    logger.fdebug(
        'ComicTagger Path location for internal comictagger.py set to : ' +
        comictagger_cmd)

    # Force mylar to use cmtagger_path = mylar.PROG_DIR to force the use of the included lib.

    logger.fdebug(module + ' Filename is : ' + filename)

    filepath = filename
    og_filepath = filepath
    try:
        filename = os.path.split(filename)[1]  # just the filename itself
    except:
        logger.warn(
            'Unable to detect filename within directory - I am aborting the tagging. You best check things out.'
        )
        return "fail"

    #make use of temporary file location in order to post-process this to ensure that things don't get hammered when converting
    new_filepath = None
    new_folder = None
    try:
        import tempfile
        logger.fdebug('Filepath: %s' % filepath)
        logger.fdebug('Filename: %s' % filename)
        new_folder = tempfile.mkdtemp(
            prefix='mylar_', dir=mylar.CONFIG.CACHE_DIR)  #prefix, suffix, dir
        logger.fdebug('New_Folder: %s' % new_folder)
        new_filepath = os.path.join(new_folder, filename)
        logger.fdebug('New_Filepath: %s' % new_filepath)
        if mylar.CONFIG.FILE_OPTS == 'copy' and manualmeta == False:
            shutil.copy(filepath, new_filepath)
        else:
            shutil.copy(filepath, new_filepath)
        filepath = new_filepath
    except Exception as e:
        logger.warn('%s Unexpected Error: %s [%s]' %
                    (module, sys.exc_info()[0], e))
        logger.warn(
            module +
            ' Unable to create temporary directory to perform meta-tagging. Processing without metatagging.'
        )
        tidyup(og_filepath, new_filepath, new_folder, manualmeta)
        return "fail"

    ## Sets up other directories ##
    scriptname = os.path.basename(sys.argv[0])
    downloadpath = os.path.abspath(dirName)
    sabnzbdscriptpath = os.path.dirname(sys.argv[0])
    comicpath = new_folder

    logger.fdebug(module + ' Paths / Locations:')
    logger.fdebug(module + ' scriptname : ' + scriptname)
    logger.fdebug(module + ' downloadpath : ' + downloadpath)
    logger.fdebug(module + ' sabnzbdscriptpath : ' + sabnzbdscriptpath)
    logger.fdebug(module + ' comicpath : ' + comicpath)
    logger.fdebug(module + ' Running the ComicTagger Add-on for Mylar')

    ##set up default comictagger options here.
    #used for cbr - to - cbz conversion
    #depending on copy/move - eitehr we retain the rar or we don't.
    if mylar.CONFIG.FILE_OPTS == 'move':
        cbr2cbzoptions = ["-e", "--delete-rar"]
    else:
        cbr2cbzoptions = ["-e"]

    tagoptions = ["-s"]
    if mylar.CONFIG.CMTAG_VOLUME:
        if mylar.CONFIG.CMTAG_START_YEAR_AS_VOLUME:
            comversion = str(comversion)
        else:
            if any(
                [comversion is None, comversion == '', comversion == 'None']):
                comversion = '1'
            comversion = re.sub('[^0-9]', '', comversion).strip()
        cvers = 'volume=' + str(comversion)
    else:
        cvers = "volume="

    tagoptions.extend(["-m", cvers])

    try:
        #from comictaggerlib import ctversion
        ct_check = subprocess.check_output(
            [sys.executable, comictagger_cmd, "--version"],
            stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as e:
        #logger.warn(module + "[WARNING] "command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
        logger.warn(
            module +
            '[WARNING] Make sure that you are using the comictagger included with Mylar.'
        )
        tidyup(filepath, new_filepath, new_folder, manualmeta)
        return "fail"

    logger.info('ct_check: %s' % ct_check)
    ctend = str(ct_check).find(':')
    ct_version = re.sub("[^0-9]", "", str(ct_check)[:ctend])
    from pkg_resources import parse_version
    if parse_version(ct_version) >= parse_version('1.3.1'):
        if any([
                mylar.CONFIG.COMICVINE_API == 'None',
                mylar.CONFIG.COMICVINE_API is None
        ]):
            logger.fdebug(
                '%s ComicTagger v.%s being used - no personal ComicVine API Key supplied. Take your chances.'
                % (module, ct_version))
            use_cvapi = "False"
        else:
            logger.fdebug(
                '%s ComicTagger v.%s being used - using personal ComicVine API key supplied via mylar.'
                % (module, ct_version))
            use_cvapi = "True"
            tagoptions.extend(["--cv-api-key", mylar.CONFIG.COMICVINE_API])
    else:
        logger.fdebug(
            '%s ComicTagger v.ct_version being used - personal ComicVine API key not supported in this version. Good luck.'
            % (module, ct_version))
        use_cvapi = "False"

    i = 1
    tagcnt = 0

    if mylar.CONFIG.CT_TAG_CR:
        tagcnt = 1
        logger.fdebug(module + ' CR Tagging enabled.')

    if mylar.CONFIG.CT_TAG_CBL:
        if not mylar.CONFIG.CT_TAG_CR:
            i = 2  #set the tag to start at cbl and end without doing another tagging.
        tagcnt = 2
        logger.fdebug(module + ' CBL Tagging enabled.')

    if tagcnt == 0:
        logger.warn(
            module +
            ' You have metatagging enabled, but you have not selected the type(s) of metadata to write. Please fix and re-run manually'
        )
        tidyup(filepath, new_filepath, new_folder, manualmeta)
        return "fail"

    #if it's a cbz file - check if no-overwrite existing tags is enabled / disabled in config.
    if filename.endswith('.cbz'):
        if mylar.CONFIG.CT_CBZ_OVERWRITE:
            logger.fdebug(
                module + ' Will modify existing tag blocks even if it exists.')
        else:
            logger.fdebug(
                module +
                ' Will NOT modify existing tag blocks even if they exist already.'
            )
            tagoptions.extend(["--nooverwrite"])

    if issueid is None:
        tagoptions.extend(["-f", "-o"])
    else:
        tagoptions.extend(["-o", "--id", issueid])

    original_tagoptions = tagoptions
    og_tagtype = None
    initial_ctrun = True
    error_remove = False

    while (i <= tagcnt):
        if initial_ctrun:
            f_tagoptions = cbr2cbzoptions
            f_tagoptions.extend([filepath])
        else:
            if i == 1:
                tagtype = 'cr'  # CR meta-tagging cycle.
                tagdisp = 'ComicRack tagging'
            elif i == 2:
                tagtype = 'cbl'  # Cbl meta-tagging cycle
                tagdisp = 'Comicbooklover tagging'

            f_tagoptions = original_tagoptions

            if og_tagtype is not None:
                for index, item in enumerate(f_tagoptions):
                    if item == og_tagtype:
                        f_tagoptions[index] = tagtype
            else:
                f_tagoptions.extend(["--type", tagtype, filepath])

            og_tagtype = tagtype

            logger.info(module + ' ' + tagdisp +
                        ' meta-tagging processing started.')

        currentScriptName = [sys.executable, comictagger_cmd]
        script_cmd = currentScriptName + f_tagoptions

        if initial_ctrun:
            logger.fdebug('%s Enabling ComicTagger script with options: %s' %
                          (module, f_tagoptions))
            script_cmdlog = script_cmd

        else:
            logger.fdebug('%s Enabling ComicTagger script with options: %s' %
                          (module,
                           re.sub(
                               f_tagoptions[f_tagoptions.index(
                                   mylar.CONFIG.COMICVINE_API)], 'REDACTED',
                               str(f_tagoptions))))
            # generate a safe command line string to execute the script and provide all the parameters
            script_cmdlog = re.sub(
                f_tagoptions[f_tagoptions.index(mylar.CONFIG.COMICVINE_API)],
                'REDACTED', str(script_cmd))

        logger.fdebug(module + ' Executing command: ' + str(script_cmdlog))
        logger.fdebug(module + ' Absolute path to script: ' + script_cmd[0])
        try:
            # use subprocess to run the command and capture output
            p = subprocess.Popen(script_cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT)
            out, err = p.communicate()
            logger.info(out)
            logger.info(err)
            if out is not None:
                out = out.decode('utf-8')
            if err is not None:
                err = err.decode('utf-8')
            if initial_ctrun and 'exported successfully' in out:
                logger.fdebug(module + '[COMIC-TAGGER] : ' + str(out))
                #Archive exported successfully to: X-Men v4 008 (2014) (Digital) (Nahga-Empire).cbz (Original deleted)
                if 'Error deleting' in filepath:
                    tf1 = out.find('exported successfully to: ')
                    tmpfilename = out[tf1 + len('exported successfully to: '
                                                ):].strip()
                    error_remove = True
                else:
                    tmpfilename = re.sub('Archive exported successfully to: ',
                                         '', out.rstrip())
                if mylar.CONFIG.FILE_OPTS == 'move':
                    tmpfilename = re.sub('\(Original deleted\)', '',
                                         tmpfilename).strip()
                tmpf = tmpfilename  #.decode('utf-8')
                filepath = os.path.join(comicpath, tmpf)
                if filename.lower() != tmpf.lower() and tmpf.endswith(
                        '(1).cbz'):
                    logger.fdebug(
                        'New filename [%s] is named incorrectly due to duplication during metatagging - Making sure it\'s named correctly [%s].'
                        % (tmpf, filename))
                    tmpfilename = filename
                    filepath_new = os.path.join(comicpath, tmpfilename)
                    try:
                        os.rename(filepath, filepath_new)
                        filepath = filepath_new
                    except:
                        logger.warn(
                            '%s unable to rename file to accomodate metatagging cbz to the same filename'
                            % module)
                if not os.path.isfile(filepath):
                    logger.fdebug(module + 'Trying utf-8 conversion.')
                    tmpf = tmpfilename.encode('utf-8')
                    filepath = os.path.join(comicpath, tmpf)
                    if not os.path.isfile(filepath):
                        logger.fdebug(module + 'Trying latin-1 conversion.')
                        tmpf = tmpfilename.encode('Latin-1')
                        filepath = os.path.join(comicpath, tmpf)

                logger.fdebug(module +
                              '[COMIC-TAGGER][CBR-TO-CBZ] New filename: ' +
                              filepath)
                initial_ctrun = False
            elif initial_ctrun and 'Archive is not a RAR' in out:
                logger.fdebug('%s Output: %s' % (module, out))
                logger.warn(module +
                            '[COMIC-TAGGER] file is not in a RAR format: ' +
                            filename)
                initial_ctrun = False
            elif initial_ctrun:
                initial_ctrun = False
                if 'file is not expected size' in out:
                    logger.fdebug('%s Output: %s' % (module, out))
                    tidyup(og_filepath, new_filepath, new_folder, manualmeta)
                    return 'corrupt'
                else:
                    logger.warn(
                        module +
                        '[COMIC-TAGGER][CBR-TO-CBZ] Failed to convert cbr to cbz - check permissions on folder : '
                        + mylar.CONFIG.CACHE_DIR +
                        ' and/or the location where Mylar is trying to tag the files from.'
                    )
                    tidyup(og_filepath, new_filepath, new_folder, manualmeta)
                    return 'fail'
            elif 'Cannot find' in out:
                logger.fdebug('%s Output: %s' % (module, out))
                logger.warn(module + '[COMIC-TAGGER] Unable to locate file: ' +
                            filename)
                file_error = 'file not found||' + filename
                return file_error
            elif 'not a comic archive!' in out:
                logger.fdebug('%s Output: %s' % (module, out))
                logger.warn(module + '[COMIC-TAGGER] Unable to locate file: ' +
                            filename)
                file_error = 'file not found||' + filename
                return file_error
            else:
                logger.info(module + '[COMIC-TAGGER] Successfully wrote ' +
                            tagdisp + ' [' + filepath + ']')
                i += 1
        except OSError as e:
            logger.warn(
                module +
                '[COMIC-TAGGER] Unable to run comictagger with the options provided: '
                + re.sub(
                    f_tagoptions[f_tagoptions.index(
                        mylar.CONFIG.COMICVINE_API)], 'REDACTED',
                    str(script_cmd)))
            tidyup(filepath, new_filepath, new_folder, manualmeta)
            return "fail"

        if mylar.CONFIG.CBR2CBZ_ONLY and initial_ctrun == False:
            break

    return filepath
Example #48
0
    def failed_check(self):
        #issueid = self.issueid
        #comicid = self.comicid

        # ID = ID passed by search upon a match upon preparing to send it to client to download.
        #     ID is provider dependent, so the same file should be different for every provider.
        module = '[FAILED_DOWNLOAD_CHECKER]'

        myDB = db.DBConnection()
        # Querying on NZBName alone will result in all downloads regardless of provider.
        # This will make sure that the files being downloaded are different regardless of provider.
        # Perhaps later improvement might be to break it down by provider so that Mylar will attempt to
        # download same issues on different providers (albeit it shouldn't matter, if it's broke it's broke).
        logger.info('prov  : ' + str(self.prov) + '[' + str(self.id) + ']')
        # if this is from nzbhydra, we need to rejig the id line so that the searchid is removed since it's always unique to the search.
        if 'indexerguid' in self.id:
            st = self.id.find('searchid:')
            end = self.id.find(',',st)
            self.id = '%' + self.id[:st] + '%' + self.id[end+1:len(self.id)-1] + '%'
            chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID LIKE ?', [self.id]).fetchone()
        else:
            chk_fail = myDB.selectone('SELECT * FROM failed WHERE ID=?', [self.id]).fetchone()

        if chk_fail is None:
            logger.info(module + ' Successfully marked this download as Good for downloadable content')
            return 'Good'
        else:
            if chk_fail['status'] == 'Good':
                logger.info(module + ' result has a status of GOOD - which means it does not currently exist in the failed download list.')
                return chk_fail['status']
            elif chk_fail['status'] == 'Failed':
                logger.info(module + ' result has a status of FAIL which indicates it is not a good choice to download.')
                logger.info(module + ' continuing search for another download.')
                return chk_fail['status']
            elif chk_fail['status'] == 'Retry':
                logger.info(module + ' result has a status of RETRY which indicates it was a failed download that retried .')
                return chk_fail['status']
            elif chk_fail['status'] == 'Retrysame':
                logger.info(module + ' result has a status of RETRYSAME which indicates it was a failed download that retried the initial download.')
                return chk_fail['status']
            else:
                logger.info(module + ' result has a status of ' + chk_fail['status'] + '. I am not sure what to do now.')
                return "nope"
Example #49
0
 def on_login(self, username):
     """Called on successful login"""
     logger.info('%s successfully logged on.' % username)
Example #50
0
def initialize(options):

    # HTTPS stuff stolen from sickbeard
    enable_https = options['enable_https']
    https_cert = options['https_cert']
    https_key = options['https_key']
    https_chain = options['https_chain']

    if enable_https:
        # If either the HTTPS certificate or key do not exist, try to make
        # self-signed ones.
        if not (https_cert and os.path.exists(https_cert)) or not (
                https_key and os.path.exists(https_key)):
            if not create_https_certificates(https_cert, https_key):
                logger.warn("Unable to create certificate and key. Disabling " \
                    "HTTPS")
                enable_https = False

        if not (os.path.exists(https_cert) and os.path.exists(https_key)):
            logger.warn("Disabled HTTPS because of missing certificate and " \
                "key.")
            enable_https = False

    options_dict = {
        'server.socket_port': options['http_port'],
        'server.socket_host': options['http_host'],
        'server.thread_pool': 10,
        'tools.encode.on': True,
        'tools.encode.encoding': 'utf-8',
        'tools.encode.text_only': False,
        'tools.decode.on': True,
        'log.screen': True,
        'engine.autoreload.on': False,
    }

    if enable_https:
        options_dict['server.ssl_certificate'] = https_cert
        options_dict['server.ssl_private_key'] = https_key
        if https_chain:
            options_dict['server.ssl_certificate_chain'] = https_chain
        protocol = "https"
    else:
        protocol = "http"

    logger.info("Starting Mylar on %s://%s:%d%s" %
                (protocol, options['http_host'], options['http_port'],
                 options['http_root']))
    cherrypy.config.update(options_dict)

    conf = {
        '/': {
            'tools.staticdir.root': os.path.join(mylar.PROG_DIR, 'data'),
            'tools.proxy.on': True  # pay attention to X-Forwarded-Proto header
        },
        '/interfaces': {
            'tools.staticdir.on': True,
            'tools.staticdir.dir': "interfaces"
        },
        '/images': {
            'tools.staticdir.on': True,
            'tools.staticdir.dir': "images"
        },
        '/css': {
            'tools.staticdir.on': True,
            'tools.staticdir.dir': "css"
        },
        '/js': {
            'tools.staticdir.on': True,
            'tools.staticdir.dir': "js"
        },
        '/favicon.ico': {
            'tools.staticfile.on':
            True,
            'tools.staticfile.filename':
            os.path.join(os.path.abspath(os.curdir),
                         'images' + os.sep + 'favicon.ico')
        },
        '/cache': {
            'tools.staticdir.on': True,
            'tools.staticdir.dir': mylar.CONFIG.CACHE_DIR
        }
    }

    if options['http_password'] is not None:
        #userpassdict = dict(zip((options['http_username'].encode('utf-8'),), (options['http_password'].encode('utf-8'),)))
        #get_ha1= cherrypy.lib.auth_digest.get_ha1_dict_plain(userpassdict)
        if options['authentication'] == 2:
            # Set up a sessions based login page instead of using basic auth,
            # using the credentials set for basic auth. Attempting to browse to
            # a restricted page without a session token will result in a
            # redirect to the login page. A sucessful login should then redirect
            # to the originally requested page.
            #
            # Login sessions timeout after 43800 minutes (1 month) unless
            # changed in the config.
            # Note - the following command doesn't actually work, see update statement 2 lines down
            # cherrypy.tools.sessions.timeout = options['login_timeout']
            conf['/'].update({
                'tools.sessions.on': True,
                'tools.auth.on': True,
                'tools.sessions.timeout': options['login_timeout'],
                'auth.forms_username': options['http_username'],
                'auth.forms_password': options['http_password'],
                # Set all pages to require authentication.
                # You can also set auth requirements on a per-method basis by
                # using the @require() decorator on the methods in webserve.py
                'auth.require': []
            })
            # exempt api, login page and static elements from authentication requirements
            for i in ('/api', '/auth/login', '/css', '/images', '/js',
                      'favicon.ico'):
                if i in conf:
                    conf[i].update({'tools.auth.on': False})
                else:
                    conf[i] = {'tools.auth.on': False}
        elif options['authentication'] == 1:
            conf['/'].update({
                'tools.auth_basic.on':
                True,
                'tools.auth_basic.realm':
                'Mylar',
                'tools.auth_basic.checkpassword':
                cherrypy.lib.auth_basic.checkpassword_dict(
                    {options['http_username']: options['http_password']})
            })
            conf['/api'] = {'tools.auth_basic.on': False}

    rest_api = {
        '/': {
            # the api uses restful method dispatching
            'request.dispatch': cherrypy.dispatch.MethodDispatcher(),

            # all api calls require that the client passes HTTP basic authentication
            'tools.auth_basic.on': False,
        }
    }

    if options['opds_authentication']:
        user_list = {}
        if len(options['opds_username']) > 0:
            user_list[options['opds_username']] = options['opds_password']
        if options['http_password'] is not None and options[
                'http_username'] != options['opds_username']:
            user_list[options['http_username']] = options['http_password']
        conf['/opds'] = {
            'tools.auth.on':
            False,
            'tools.auth_basic.on':
            True,
            'tools.auth_basic.realm':
            'Mylar OPDS',
            'tools.auth_basic.checkpassword':
            cherrypy.lib.auth_basic.checkpassword_dict(user_list)
        }
    else:
        conf['/opds'] = {'tools.auth_basic.on': False, 'tools.auth.on': False}

    # Prevent time-outs
    #cherrypy.engine.timeout_monitor.unsubscribe()

    cherrypy.tree.mount(WebInterface(), str(options['http_root']), config=conf)

    restroot = REST()
    restroot.comics = restroot.Comics()
    restroot.comic = restroot.Comic()
    restroot.watchlist = restroot.Watchlist()
    #restroot.issues = restroot.comic.Issues()
    #restroot.issue = restroot.comic.Issue()
    cherrypy.tree.mount(restroot, '/rest', config=rest_api)

    try:
        portend.Checker().assert_free(options['http_host'],
                                      options['http_port'])
        cherrypy.server.start()
    except Exception as e:
        logger.error('[ERROR] %s' % e)
        print('Failed to start on port: %i. Is something else running?' %
              (options['http_port']))
        sys.exit(0)

    cherrypy.server.wait()
Example #51
0
def solicit(month, year):
    #convert to numerics just to ensure this...
    month = int(month)
    year = int(year)

    #print ( "month: " + str(month) )
    #print ( "year: " + str(year) )

    # in order to gather ALL upcoming - let's start to loop through months going ahead one at a time
    # until we get a null then break. (Usually not more than 3 months in advance is available)
    mnloop = 0
    upcoming = []

    publishers = {
        'DC Comics': 'DC Comics',
        'DC\'s': 'DC Comics',
        'Marvel': 'Marvel Comics',
        'Image': 'Image Comics',
        'IDW': 'IDW Publishing',
        'Dark Horse': 'Dark Horse'
    }

    # -- this is no longer needed (testing)
    #    while (mnloop < 5):
    #        if year == 2014:
    #            if len(str(month)) == 1:
    #                month_string = '0' + str(month)
    #            else:
    #                month_string = str(month)
    #            datestring = str(year) + str(month_string)
    #        else:
    #            datestring = str(month) + str(year)

    #        pagelinks = "http://www.comicbookresources.com/tag/solicits" + str(datestring)

    #using the solicits+datestring leaves out some entries occasionally
    #should use http://www.comicbookresources.com/tag/solicitations
    #then just use the logic below but instead of datestring, find the month term and
    #go ahead up to +5 months.

    if month > 0:
        month_start = month
        month_end = month + 5
        #if month_end > 12:
        # ms = 8, me=13  [(12-8)+(13-12)] = [4 + 1] = 5
        # [(12 - ms) + (me - 12)] = number of months (5)

        monthlist = []
        mongr = month_start

        #we need to build the months we can grab, but the non-numeric way.
        while (mongr <= month_end):
            mon = mongr
            if mon == 13:
                mon = 1
                year += 1

            if len(str(mon)) == 1:
                mon = '0' + str(mon)

            monthlist.append({
                "month": helpers.fullmonth(str(mon)).lower(),
                "num_month": mon,
                "year": str(year)
            })
            mongr += 1

        logger.info('months: ' + str(monthlist))

        pagelinks = "http://www.comicbookresources.com/tag/solicitations"

        #logger.info('datestring:' + datestring)
        #logger.info('checking:' + pagelinks)
        pageresponse = urllib2.urlopen(pagelinks)
        soup = BeautifulSoup(pageresponse)
        cntlinks = soup.findAll('h3')
        lenlinks = len(cntlinks)
        #logger.info( str(lenlinks) + ' results' )

        publish = []
        resultURL = []
        resultmonth = []
        resultyear = []

        x = 0
        cnt = 0

        while (x < lenlinks):
            headt = cntlinks[
                x]  #iterate through the hrefs pulling out only results.
            if "/?page=article&amp;id=" in str(headt):
                #print ("titlet: " + str(headt))
                headName = headt.findNext(text=True)
                #print ('headName: ' + headName)
                if 'Image' in headName: print 'IMAGE FOUND'
                if not all([
                        'Marvel' in headName, 'DC' in headName, 'Image'
                        in headName
                ]) and ('Solicitations' in headName or 'Solicits' in headName):
                    # test for month here (int(month) + 5)
                    if not any(
                            d.get('month', None) == str(headName).lower()
                            for d in monthlist):
                        for mt in monthlist:
                            if mt['month'] in headName.lower():
                                logger.info('matched on month: ' +
                                            str(mt['month']))
                                logger.info('matched on year: ' +
                                            str(mt['year']))
                                resultmonth.append(mt['num_month'])
                                resultyear.append(mt['year'])

                                pubstart = headName.find('Solicitations')
                                publishchk = False
                                for pub in publishers:
                                    if pub in headName[:pubstart]:
                                        #print 'publisher:' + str(publishers[pub])
                                        publish.append(publishers[pub])
                                        publishchk = True
                                        break
                                if publishchk == False:
                                    break
                                    #publish.append( headName[:pubstart].strip() )
                                abc = headt.findAll('a', href=True)[0]
                                ID_som = abc[
                                    'href']  #first instance will have the right link...
                                resultURL.append(ID_som)
                                #print '(' + str(cnt) + ') [ ' + publish[cnt] + '] Link URL: ' + resultURL[cnt]
                                cnt += 1

                    else:
                        logger.info('incorrect month - not using.')

            x += 1

        if cnt == 0:
            return  #break  # no results means, end it

        loopthis = (cnt - 1)
        #this loops through each 'found' solicit page
        #shipdate = str(month_string) + '-' + str(year)  - not needed.
        while (loopthis >= 0):
            #print 'loopthis is : ' + str(loopthis)
            #print 'resultURL is : ' + str(resultURL[loopthis])
            shipdate = str(resultmonth[loopthis]) + '-' + str(
                resultyear[loopthis])
            upcoming += populate(resultURL[loopthis], publish[loopthis],
                                 shipdate)
            loopthis -= 1

    logger.info(str(len(upcoming)) + ' upcoming issues discovered.')

    newfl = mylar.CACHE_DIR + "/future-releases.txt"
    newtxtfile = open(newfl, 'wb')

    cntr = 1
    for row in upcoming:
        if row['Extra'] is None or row['Extra'] == '':
            extrarow = 'N/A'
        else:
            extrarow = row['Extra']
        newtxtfile.write(
            str(row['Shipdate']) + '\t' + str(row['Publisher']) + '\t' +
            str(row['Issue']) + '\t' + str(row['Comic']) + '\t' +
            str(extrarow) + '\tSkipped' + '\t' + str(cntr) + '\n')
        cntr += 1

    newtxtfile.close()

    logger.fdebug('attempting to populate future upcoming...')

    mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")

    connection = sqlite3.connect(str(mylardb))
    cursor = connection.cursor()

    # we should extract the issues that are being watched, but no data is available yet ('Watch For' status)
    # once we get the data, store it, wipe the existing table, retrieve the new data, populate the data into
    # the table, recheck the series against the current watchlist and then restore the Watch For data.

    cursor.executescript('drop table if exists future;')

    cursor.execute(
        "CREATE TABLE IF NOT EXISTS future (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, FutureID text, ComicID text);"
    )
    connection.commit()

    csvfile = open(newfl, "rb")
    creader = csv.reader(csvfile, delimiter='\t')

    t = 1

    for row in creader:
        try:
            #print ("Row: %s" % row)
            cursor.execute("INSERT INTO future VALUES (?,?,?,?,?,?,?,null);",
                           row)
        except Exception, e:
            logger.fdebug("Error - invald arguments...-skipping")
            pass
        t += 1
Example #52
0
    def load_torrent(self, filepath):
        
        logger.info('filepath to torrent file set to : ' + filepath)
        torrent_id = False
                
        if self.client.connected is True:
            logger.info('Checking if Torrent Exists!')
            
            if not filepath.startswith('magnet'):
                torrentcontent = open(filepath, 'rb').read()
                hash = str.lower(self.get_the_hash(filepath)) # Deluge expects a lower case hash

                logger.debug('Torrent Hash (load_torrent): "' + hash + '"')
                logger.debug('FileName (load_torrent): ' + str(os.path.basename(filepath)))


                #Check if torrent already added
                if self.find_torrent(str.lower(hash)):
                    logger.info('load_torrent: Torrent already exists!')
                    #should set something here to denote that it's already loaded, and then the failed download checker not run so it doesn't download
                    #multiple copies of the same issues that's already downloaded
                else:
                    logger.info('Torrent not added yet, trying to add it now!')
                    try:
                        torrent_id = self.client.call('core.add_torrent_file', str(os.path.basename(filepath)), base64.encodestring(torrentcontent), '')
                    except Exception as e:
                        logger.debug('Torrent not added')
                        return False
            else:
                try:
                    torrent_id = self.client.call('core.add_torrent_magnet', str(filepath), {})
                except Exception as e:
                    logger.debug('Torrent not added')
                    return False

            # If label enabled put label on torrent in Deluge
            if torrent_id and mylar.DELUGE_LABEL:
                logger.info ('Setting label to ' + mylar.DELUGE_LABEL)
                try:
                    self.client.call('label.set_torrent', torrent_id, mylar.DELUGE_LABEL)
                except:
                 #if label isn't set, let's try and create one.
                    try:
                        self.client.call('label.add', mylar.DELUGE_LABEL)
                        self.client.call('label.set_torrent', torrent_id, mylar.DELUGE_LABEL)
                    except:
                        logger.warn('Unable to set label - Either try to create it manually within Deluge, and/or ensure there are no spaces, capitalization or special characters in label')
                    else:
                        logger.info('Succesfully set label to ' + mylar.DELUGE_LABEL)

        try:
            torrent_info = self.get_torrent(torrent_id)
            logger.info('Double checking that the torrent was added.')
        except Exception as e:
            logger.warn('Torrent was not added! Please check logs')
            return False
        else:
            logger.info('Torrent successfully added!')
            return {'hash':             torrent_info['hash'],
                    'label':            mylar.DELUGE_LABEL,
                    'folder':           torrent_info['save_path'],
                    'total_filesize':   torrent_info['total_size'],
                    'name':             torrent_info['name'],
                    'files':            torrent_info['files'],
                    'time_started':     torrent_info['active_time'],
                    'completed':        torrent_info['is_finished']}
Example #53
0
 def validate(self):
     logger.info('attempting to validate...')
     req = cherrypy.request.headers
     logger.info('thekey: %s' % req)
     logger.info('url: %s' % cherrypy.url())
     logger.info('mylar.apikey: %s [%s]' % (mylar.CONFIG.API_KEY, type(mylar.CONFIG.API_KEY)))
     logger.info('submitted.apikey: %s [%s]' % (req['Api-Key'], type(req['Api-Key'])))
     if 'Api-Key' not in req or req['Api-Key'] != str(mylar.CONFIG.API_KEY): #str(mylar.API_KEY) or mylar.API_KEY not in cherrypy.url():
         logger.info('wrong APIKEY')
         return 'api-key provided was either not present in auth header, or was incorrect.'
     else:
         return True
Example #54
0
def solicit(month, year):
    #convert to numerics just to ensure this...
    month = int(month)
    year = int(year)

    #print ( "month: " + str(month) )
    #print ( "year: " + str(year) )

    # in order to gather ALL upcoming - let's start to loop through months going ahead one at a time
    # until we get a null then break. (Usually not more than 3 months in advance is available)
    mnloop = 0
    upcoming = []

    publishers = {
        'DC Comics': 'DC Comics',
        'Marvel': 'Marvel Comics',
        'Image': 'Image Comics',
        'IDW': 'IDW Publishing',
        'Dark Horse': 'Dark Horse Comics'
    }

    while (mnloop < 5):
        if year == 2014:
            if len(str(month)) == 1:
                month_string = '0' + str(month)
            else:
                month_string = str(month)
            datestring = str(year) + str(month_string)
        else:
            datestring = str(month) + str(year)
        pagelinks = "http://www.comicbookresources.com/tag/solicits" + str(
            datestring)
        pageresponse = urllib2.urlopen(pagelinks)
        soup = BeautifulSoup(pageresponse)
        cntlinks = soup.findAll('h3')
        lenlinks = len(cntlinks)
        logger.info(str(lenlinks) + ' results')

        publish = []
        resultURL = []

        x = 0
        cnt = 0

        while (x < lenlinks):
            headt = cntlinks[
                x]  #iterate through the hrefs pulling out only results.
            if "/?page=article&amp;id=" in str(headt):
                #print ("titlet: " + str(headt))
                headName = headt.findNext(text=True)
                if ('Marvel' and 'DC' and 'Image' not in headName) and (
                        'Solicitations' in headName or 'Solicits' in headName):
                    pubstart = headName.find('Solicitations')
                    for pub in publishers:
                        if pub in headName[:pubstart]:
                            publish.append(publishers[pub])
                            #publish.append( headName[:pubstart].strip() )
                    abc = headt.findAll('a', href=True)[0]
                    ID_som = abc[
                        'href']  #first instance will have the right link...
                    resultURL.append(ID_som)
                    #print '[ ' + publish[cnt] + '] Link URL: ' + resultURL[cnt]
                    cnt += 1
            x += 1

        #print 'cnt:' + str(cnt)

        if cnt == 0:
            break  # no results means, end it

        loopthis = (cnt - 1)
        #this loops through each 'found' solicit page
        shipdate = str(month) + '-' + str(year)
        while (loopthis >= 0):
            upcoming += populate(resultURL[loopthis], publish[loopthis],
                                 shipdate)
            loopthis -= 1

        month += 1  #increment month by 1
        mnloop += 1  #increment loop by 1

        if month > 12:  #failsafe failover for months
            month = 1
            year += 1

    #print upcoming
    logger.info(str(len(upcoming)) + ' upcoming issues discovered.')

    newfl = mylar.CACHE_DIR + "/future-releases.txt"
    newtxtfile = open(newfl, 'wb')

    cntr = 1
    for row in upcoming:
        if row['Extra'] is None or row['Extra'] == '':
            extrarow = 'N/A'
        else:
            extrarow = row['Extra']
        newtxtfile.write(
            str(row['Shipdate']) + '\t' + str(row['Publisher']) + '\t' +
            str(row['Issue']) + '\t' + str(row['Comic']) + '\t' +
            str(extrarow) + '\tSkipped' + '\t' + str(cntr) + '\n')
        cntr += 1

    newtxtfile.close()

    logger.fdebug('attempting to populate future upcoming...')

    mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")

    connection = sqlite3.connect(str(mylardb))
    cursor = connection.cursor()

    cursor.executescript('drop table if exists future;')

    cursor.execute(
        "CREATE TABLE IF NOT EXISTS future (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, FutureID text, ComicID text);"
    )
    connection.commit()

    csvfile = open(newfl, "rb")
    creader = csv.reader(csvfile, delimiter='\t')

    t = 1

    for row in creader:
        try:
            #print ("Row: %s" % row)
            cursor.execute("INSERT INTO future VALUES (?,?,?,?,?,?,?,null);",
                           row)
        except Exception, e:
            logger.fdebug("Error - invald arguments...-skipping")
            pass
        t += 1
Example #55
0
def getVersion():

    if mylar.CONFIG.GIT_BRANCH is not None and mylar.CONFIG.GIT_BRANCH.startswith(
            'win32build'):

        mylar.INSTALL_TYPE = 'win'

        # Don't have a way to update exe yet, but don't want to set VERSION to None
        return 'Windows Install', 'None'

    elif os.path.isdir(os.path.join(mylar.PROG_DIR, '.git')):

        mylar.INSTALL_TYPE = 'git'
        output, err = runGit('rev-parse HEAD')

        if not output:
            logger.error('Couldn\'t find latest installed version.')
            cur_commit_hash = None

        #branch_history, err = runGit("log --oneline --pretty=format:'%h - %ar - %s' -n 5")
        #bh = []
        #print ("branch_history: " + branch_history)
        #bh.append(branch_history.split('\n'))
        #print ("bh1: " + bh[0])

        cur_commit_hash = str(output).strip()

        if not re.match('^[a-z0-9]+$', cur_commit_hash):
            logger.error('Output does not look like a hash, not using it')
            cur_commit_hash = None

        if mylar.CONFIG.GIT_BRANCH:
            branch = mylar.CONFIG.GIT_BRANCH

        else:
            branch = None

            branch_name, err = runGit('branch --contains %s' % cur_commit_hash)
            if not branch_name:
                logger.warn(
                    'Could not retrieve branch name [%s] from git. Defaulting to Master.'
                    % branch)
                branch = 'master'
            else:
                for line in branch_name.split('\n'):
                    if '*' in line:
                        branch = re.sub('[\*\n]', '', line).strip()
                        break

                if not branch and mylar.CONFIG.GIT_BRANCH:
                    logger.warn(
                        'Unable to retrieve branch name [%s] from git. Setting branch to configuration value of : %s'
                        % (branch, mylar.CONFIG.GIT_BRANCH))
                    branch = mylar.CONFIG.GIT_BRANCH
                if not branch:
                    logger.warn(
                        'Could not retrieve branch name [%s] from git. Defaulting to Master.'
                        % branch)
                    branch = 'master'
                else:
                    logger.info('Branch detected & set to : %s' % branch)

        return cur_commit_hash, branch

    else:

        mylar.INSTALL_TYPE = 'source'

        version_file = os.path.join(mylar.PROG_DIR, 'version.txt')

        if not os.path.isfile(version_file):
            current_version = None
        else:
            with open(version_file, 'r') as f:
                current_version = f.read().strip(' \n\r')

        if current_version:
            if mylar.CONFIG.GIT_BRANCH:
                logger.info('Branch detected & set to : ' +
                            mylar.CONFIG.GIT_BRANCH)
                return current_version, mylar.CONFIG.GIT_BRANCH
            else:
                logger.warn(
                    'No branch specified within config - will attempt to poll version from mylar'
                )
                try:
                    branch = version.MYLAR_VERSION
                    logger.info('Branch detected & set to : ' + branch)
                except:
                    branch = 'master'
                    logger.info(
                        'Unable to detect branch properly - set branch in config.ini, currently defaulting to : '
                        + branch)
                return current_version, branch
        else:
            if mylar.CONFIG.GIT_BRANCH:
                logger.info('Branch detected & set to : ' +
                            mylar.CONFIG.GIT_BRANCH)
                return current_version, mylar.CONFIG.GIT_BRANCH
            else:
                logger.warn(
                    'No branch specified within config - will attempt to poll version from mylar'
                )
                try:
                    branch = version.MYLAR_VERSION
                    logger.info('Branch detected & set to : ' + branch)
                except:
                    branch = 'master'
                    logger.info(
                        'Unable to detect branch properly - set branch in config.ini, currently defaulting to : '
                        + branch)
                return current_version, branch

            logger.warn(
                'Unable to determine which commit is currently being run. Defaulting to Master branch.'
            )
Example #56
0
    def load_torrent(self, filepath):
        start = bool(mylar.RTORRENT_STARTONLOAD)

        logger.info('filepath to torrent file set to : ' + filepath)

        torrent = self.conn.load_torrent(filepath, verify_load=True)
        if not torrent:
            return False

        if mylar.RTORRENT_LABEL:
            torrent.set_custom(1, mylar.RTORRENT_LABEL)
            logger.info('Setting label for torrent to : ' +
                        mylar.RTORRENT_LABEL)

        if mylar.RTORRENT_DIRECTORY:
            torrent.set_directory(mylar.RTORRENT_DIRECTORY)
            logger.info('Setting directory for torrent to : ' +
                        mylar.RTORRENT_DIRECTORY)

        logger.info('Successfully loaded torrent.')

        #note that if set_directory is enabled, the torrent has to be started AFTER it's loaded or else it will give chunk errors and not seed
        if start:
            logger.info('[' + str(start) + '] Now starting torrent.')
            torrent.start()
        else:
            logger.info('[' + str(start) +
                        '] Not starting torrent due to configuration setting.')
        return True
Example #57
0
    def sender(self, filename, test=False):
        if mylar.CONFIG.NZBGET_PRIORITY:
            if any([
                    mylar.CONFIG.NZBGET_PRIORITY == 'Default',
                    mylar.CONFIG.NZBGET_PRIORITY == 'Normal'
            ]):
                nzbgetpriority = 0
            elif mylar.CONFIG.NZBGET_PRIORITY == 'Low':
                nzbgetpriority = -50
            elif mylar.CONFIG.NZBGET_PRIORITY == 'High':
                nzbgetpriority = 50
            elif mylar.CONFIG.NZBGET_PRIORITY == 'Very High':
                nzbgetpriority = 100
            elif mylar.CONFIG.NZBGET_PRIORITY == 'Force':
                nzbgetpriority = 900
            #there's no priority for "paused", so set "Very Low" and deal with that later...
            elif mylar.CONFIG.NZBGET_PRIORITY == 'Paused':
                nzbgetpriority = -100
        else:
            #if nzbget priority isn't selected, default to Normal (0)
            nzbgetpriority = 0

        with open(filename, 'rb') as in_file:
            nzbcontent = in_file.read()
            nzbcontent64 = standard_b64encode(nzbcontent).decode('utf-8')

        try:
            logger.fdebug('sending now to %s' % self.display_url)
            if mylar.CONFIG.NZBGET_CATEGORY is None:
                nzb_category = ''
            else:
                nzb_category = mylar.CONFIG.NZBGET_CATEGORY
            sendresponse = self.server.append(filename, nzbcontent64,
                                              nzb_category, nzbgetpriority,
                                              False, False, '', 0, 'SCORE')
        except http.client.socket.error as e:
            nzb_url = re.sub(mylar.CONFIG.NZBGET_PASSWORD, 'REDACTED', str(e))
            logger.error(
                'Please check your NZBget host and port (if it is running). Tested against: %s'
                % nzb_url)
            return {'status': False}
        except xmlrpc.client.ProtocolError as e:
            logger.info(e, )
            if e.errmsg == "Unauthorized":
                err = re.sub(mylar.CONFIG.NZBGET_PASSWORD, 'REDACTED',
                             e.errmsg)
                logger.error('Unauthorized username / password provided: %s' %
                             err)
                return {'status': False}
            else:
                err = "Protocol Error: %s" % re.sub(
                    mylar.CONFIG.NZBGET_PASSWORD, 'REDACTED', e.errmsg)
                logger.error('Protocol error returned: %s' % err)
                return {'status': False}
        except Exception as e:
            logger.warn(
                'uh-oh: %s' %
                re.sub(mylar.CONFIG.NZBGET_PASSWORD, 'REDACTED', str(e)))
            return {'status': False}
        else:
            if sendresponse <= 0:
                logger.warn(
                    'Invalid response received after sending to NZBGet: %s' %
                    sendresponse)
                return {'status': False}
            else:
                #sendresponse is the NZBID that we use to track the progress....
                return {'status': True, 'NZBID': sendresponse}
Example #58
0
def extract_image(location, single=False, imquality=None):
    #location = full path to the cbr/cbz (filename included in path)
    #single = should be set to True so that a single file can have the coverfile
    #        extracted and have the cover location returned to the calling function
    #imquality = the calling function ('notif' for notifications will initiate a resize image before saving the cover)
    if PIL_Found is False:
        return
    cover = "notfound"
    pic_extensions = ('.jpg', '.png', '.webp')
    issue_ends = ('1', '0')
    modtime = os.path.getmtime(location)
    low_infile = 9999999999999
    low_num = 1000
    local_filename = os.path.join(mylar.CONFIG.CACHE_DIR, 'temp_notif')
    cb_filename = None
    cb_filenames = []
    metadata = None
    if single is True:
        if location.endswith(".cbz"):
            location_in = zipfile.ZipFile(location)
            dir_opt = 'is_dir'
            actual_ext = '.cbz'
        else:
            try:
                location_in = rarfile.RarFile(location)
                dir_opt = 'isdir'
                actual_ext = '.cbr'
            except rarfile.BadRarFile as e:
                logger.warn('[WARNING] %s: %s' % (location, e))
                try:
                    logger.info(
                        'Trying to see if this is a zip renamed as a rar: %s' %
                        (location))
                    location_in = zipfile.ZipFile(location)
                    dir_opt = 'is_dir'
                    actual_ext = '.cbz'
                except Exception as e:
                    logger.warn('[EXCEPTION] %s' % e)
                    return
            except:
                logger.warn('[EXCEPTION]: %s' % sys.exec_info()[0])
                return
        try:
            for infile in location_in.infolist():
                basename = os.path.basename(infile.filename)
                if infile.filename == 'ComicInfo.xml':
                    logger.fdebug('Extracting ComicInfo.xml to display.')
                    metadata = location_in.read(infile.filename)
                    if cover == 'found':
                        break
                filename, extension = os.path.splitext(basename)
                tmp_infile = re.sub("[^0-9]", "", filename).strip()
                if any([
                        tmp_infile == '', not getattr(infile, dir_opt), 'zzz'
                        in filename
                ]):
                    continue
                #logger.fdebug('[%s]issue_ends: %s' % (tmp_infile, tmp_infile.endswith(issue_ends)))
                #logger.fdebug('ext_ends: %s' % infile.filename.lower().endswith(pic_extensions))
                #logger.fdebug('(%s) < (%s) == %s' % (int(tmp_infile), int(low_infile), int(tmp_infile)<int(low_infile)))
                #logger.fdebug('is_dir == %s' % (not getattr(infile, dir_opt)))
                if all([
                        infile.filename.lower().endswith(pic_extensions),
                        int(tmp_infile) < int(low_infile)
                ]):
                    low_infile = tmp_infile
                    low_infile_name = infile.filename
                elif any([
                        '00a' in infile.filename, '00b' in infile.filename,
                        '00c' in infile.filename, '00d' in infile.filename,
                        '00e' in infile.filename, '00fc'
                        in infile.filename.lower()
                ]) and infile.filename.endswith(
                        pic_extensions) and cover == "notfound":
                    altlist = ('00a', '00b', '00c', '00d', '00e', '00fc')
                    for alt in altlist:
                        if alt in infile.filename.lower():
                            cb_filename = infile.filename
                            cover = "found"
                            #logger.fdebug('[%s] cover found:%s' % (alt, infile.filename))
                            break
                elif all([
                        tmp_infile.endswith(issue_ends),
                        infile.filename.lower().endswith(pic_extensions),
                        int(tmp_infile) < int(low_infile), cover == 'notfound'
                ]):
                    cb_filenames.append(infile.filename)
                    #logger.fdebug('filename set to: %s' % infile.filename)
                    #low_infile_name = infile.filename
                    #low_infile = tmp_infile
            if cover != "found" and any(
                [len(cb_filenames) > 0, low_infile != 9999999999999]):
                logger.fdebug(
                    'Invalid naming sequence for jpgs discovered. Attempting to find the lowest sequence and will use as cover (it might not work). Currently : %s'
                    % (low_infile_name))
                cb_filename = low_infile_name
                cover = "found"

        except Exception as e:
            logger.error(
                '[ERROR] Unable to properly retrieve the cover. It\'s probably best to re-tag this file : %s'
                % e)
            return

        logger.fdebug('cb_filename set to : %s' % cb_filename)

        if extension is not None:
            ComicImage = local_filename + extension
            try:
                insidefile = location_in.getinfo(cb_filename)
                img = Image.open(BytesIO(location_in.read(insidefile)))
                wpercent = (600 / float(img.size[0]))
                hsize = int((float(img.size[1]) * float(wpercent)))
                img = img.resize((600, hsize), Image.ANTIALIAS)
                output = BytesIO()
                img.save(output, format="JPEG")
                try:
                    ComicImage = str(base64.b64encode(output.getvalue()),
                                     'utf-8')
                except Exception as e:
                    ComicImage = str(
                        base64.b64encode(output.getvalue() + "==="), 'utf-8')
                output.close()

            except Exception as e:
                logger.warn('[WARNING] Unable to resize existing image: %s' %
                            e)
        else:
            ComicImage = local_filename
    return {'ComicImage': ComicImage, 'metadata': metadata}
Example #59
0
    def notify(self,
               snline=None,
               prline=None,
               prline2=None,
               snatched=None,
               sent_to=None,
               prov=None,
               module=None,
               method=None):
        if module is None:
            module = ''
        module += '[NOTIFIER]'

        #        http_handler = HTTPSConnection("api.pushbullet.com")

        #        if method == 'GET':
        #            uri = '/v2/devices'
        #        else:
        #            method = 'POST'
        #            uri = '/v2/pushes'

        #        authString = base64.b64encode(self.apikey + ":")

        if method == 'GET':
            pass


#           http_handler.request(method, uri, None, headers={'Authorization': 'Basic %s:' % authString})
        else:
            if snatched:
                if snatched[-1] == '.': snatched = snatched[:-1]
                event = snline
                message = "Mylar has snatched: " + snatched + " from " + prov + " and has sent it to " + sent_to
            else:
                event = prline + ' complete!'
                message = prline2

            data = {
                'type': "note",  #'device_iden': self.deviceid,
                'title': event.encode('utf-8'),  #"mylar",
                'body': message.encode('utf-8')
            }

        r = self._session.post(self.PUSH_URL, data=json.dumps(data))
        dt = r.json()
        if r.status_code == 200:
            if method == 'GET':
                return dt
            else:
                logger.info(module + ' PushBullet notifications sent.')
                return {
                    'status': True,
                    'message': 'APIKEY verified OK / notification sent'
                }
        elif r.status_code >= 400 and r.status_code < 500:
            logger.error(module + ' PushBullet request failed: %s' % r.content)
            return {
                'status': False,
                'message':
                '[' + str(r.status_code) + '] ' + dt['error']['message']
            }
        else:
            logger.error(module +
                         ' PushBullet notification failed serverside: %s' %
                         r.content)
            return {
                'status': False,
                'message':
                '[' + str(r.status_code) + '] ' + dt['error']['message']
            }
Example #60
0
def main():

    # Fixed paths to mylar
    if hasattr(sys, 'frozen'):
        mylar.FULL_PATH = os.path.abspath(sys.executable)
    else:
        mylar.FULL_PATH = os.path.abspath(__file__)

    mylar.PROG_DIR = os.path.dirname(mylar.FULL_PATH)
    mylar.ARGS = sys.argv[1:]

    # From sickbeard
    mylar.SYS_ENCODING = None

    try:
        locale.setlocale(locale.LC_ALL, "")
        mylar.SYS_ENCODING = locale.getpreferredencoding()
    except (locale.Error, IOError):
        pass

    # for OSes that are poorly configured I'll just force UTF-8
    if not mylar.SYS_ENCODING or mylar.SYS_ENCODING in ('ANSI_X3.4-1968',
                                                        'US-ASCII', 'ASCII'):
        mylar.SYS_ENCODING = 'UTF-8'

    if not logger.LOG_LANG.startswith('en'):
        print(
            'language detected as non-English (%s). Forcing specific logging module - errors WILL NOT be captured in the logs'
            % logger.LOG_LANG)
    else:
        print('log language set to %s' % logger.LOG_LANG)

    # Set up and gather command line arguments
    parser = argparse.ArgumentParser(
        description='Automated Comic Book Downloader')
    subparsers = parser.add_subparsers(title='Subcommands', dest='maintenance')
    parser_maintenance = subparsers.add_parser(
        'maintenance',
        help=
        'Enter maintenance mode (no GUI). Additional commands are available (maintenance --help)'
    )

    #main parser
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Increase console logging verbosity')
    parser.add_argument('-q',
                        '--quiet',
                        action='store_true',
                        help='Turn off console logging')
    parser.add_argument('-d',
                        '--daemon',
                        action='store_true',
                        help='Run as a daemon')
    parser.add_argument('-p',
                        '--port',
                        type=int,
                        help='Force mylar to run on a specified port')
    parser.add_argument(
        '-b',
        '--backup',
        action='store_true',
        help=
        'Will automatically backup & keep the last 2 copies of the .db & ini files prior to startup'
    )
    parser.add_argument(
        '-w',
        '--noweekly',
        action='store_true',
        help=
        'Turn off weekly pull list check on startup (quicker boot sequence)')
    parser.add_argument(
        '--datadir', help='Specify a directory where to store your data files')
    parser.add_argument('--config', help='Specify a config file to use')
    parser.add_argument('--nolaunch',
                        action='store_true',
                        help='Prevent browser from launching on startup')
    parser.add_argument(
        '--pidfile',
        help='Create a pid file (only relevant when running as a daemon)')
    parser.add_argument(
        '--safe',
        action='store_true',
        help=
        'redirect the startup page to point to the Manage Comics screen on startup'
    )
    parser_maintenance.add_argument(
        '-xj',
        '--exportjson',
        action='store',
        help='Export existing mylar.db to json file')
    parser_maintenance.add_argument('-id',
                                    '--importdatabase',
                                    action='store',
                                    help='Import a mylar.db into current db')
    parser_maintenance.add_argument(
        '-ij',
        '--importjson',
        action='store',
        help=
        'Import a specified json file containing just {"ComicID": "XXXXX"} into current db'
    )
    parser_maintenance.add_argument('-st',
                                    '--importstatus',
                                    action='store_true',
                                    help='Provide current maintenance status')
    parser_maintenance.add_argument(
        '-u',
        '--update',
        action='store_true',
        help='force mylar to perform an update as if in GUI')
    parser_maintenance.add_argument(
        '-fs',
        '--fixslashes',
        action='store_true',
        help='remove double-slashes from within paths in db')
    #parser_maintenance.add_argument('-it', '--importtext', action='store', help='Import a specified text file into current db')

    args = parser.parse_args()

    if args.maintenance:
        if all([
                args.exportjson is None, args.importdatabase is None,
                args.importjson is None, args.importstatus is False,
                args.update is False, args.fixslashes is False
        ]):
            print(
                'Expecting subcommand with the maintenance positional argumeent'
            )
            sys.exit()
        mylar.MAINTENANCE = True
    else:
        mylar.MAINTENANCE = False

    if args.verbose:
        print('Verbose/Debugging mode enabled...')
        mylar.LOG_LEVEL = 2
    elif args.quiet:
        mylar.QUIET = True
        print('Quiet logging mode enabled...')
        mylar.LOG_LEVEL = 0
    else:
        mylar.LOG_LEVEL = 1

    if args.daemon:
        if sys.platform == 'win32':
            print("Daemonize not supported under Windows, starting normally")
        else:
            mylar.DAEMON = True

    if args.pidfile:
        mylar.PIDFILE = str(args.pidfile)

        # If the pidfile already exists, mylar may still be running, so exit
        if os.path.exists(mylar.PIDFILE):
            sys.exit("PID file '" + mylar.PIDFILE +
                     "' already exists. Exiting.")

        # The pidfile is only useful in daemon mode, make sure we can write the file properly
        if mylar.DAEMON:
            mylar.CREATEPID = True
            try:
                open(mylar.PIDFILE, 'w').write("pid\n")
            except IOError as e:
                raise SystemExit("Unable to write PID file: %s [%d]" %
                                 (e.strerror, e.errno))
        else:
            print("Not running in daemon mode. PID file creation disabled.")

    if args.datadir:
        mylar.DATA_DIR = args.datadir
    else:
        mylar.DATA_DIR = mylar.PROG_DIR

    if args.config:
        mylar.CONFIG_FILE = args.config
    else:
        mylar.CONFIG_FILE = os.path.join(mylar.DATA_DIR, 'config.ini')

    if args.safe:
        mylar.SAFESTART = True
    else:
        mylar.SAFESTART = False

    if args.noweekly:
        mylar.NOWEEKLY = True
    else:
        mylar.NOWEEKLY = False

    # Put the database in the DATA_DIR
    mylar.DB_FILE = os.path.join(mylar.DATA_DIR, 'mylar.db')

    # Read config and start logging
    if mylar.MAINTENANCE is False:
        print('Initializing startup sequence....')

    #try:
    mylar.initialize(mylar.CONFIG_FILE)
    #except Exception as e:
    #    print e
    #    raise SystemExit('FATAL ERROR')

    if mylar.MAINTENANCE is False:
        filechecker.validateAndCreateDirectory(mylar.DATA_DIR, True)

        # Make sure the DATA_DIR is writeable
        if not os.access(mylar.DATA_DIR, os.W_OK):
            raise SystemExit('Cannot write to the data directory: ' +
                             mylar.DATA_DIR + '. Exiting...')

    # backup the db and configs before they load.
    if args.backup:
        print('[AUTO-BACKUP] Backing up .db and config.ini files for safety.')
        backupdir = os.path.join(mylar.DATA_DIR, 'backup')

        try:
            os.makedirs(backupdir)
            print(
                '[AUTO-BACKUP] Directory does not exist for backup - creating : '
                + backupdir)
        except OSError as exception:
            if exception.errno != errno.EEXIST:
                print('[AUTO-BACKUP] Directory already exists.')
                raise

        i = 0
        while (i < 2):
            if i == 0:
                ogfile = mylar.DB_FILE
                back = os.path.join(backupdir, 'mylar.db')
                back_1 = os.path.join(backupdir, 'mylar.db.1')
            else:
                ogfile = mylar.CONFIG_FILE
                back = os.path.join(backupdir, 'config.ini')
                back_1 = os.path.join(backupdir, 'config.ini.1')

            try:
                print('[AUTO-BACKUP] Now Backing up mylar.db file')
                if os.path.isfile(back_1):
                    print('[AUTO-BACKUP] ' + back_1 +
                          ' exists. Deleting and keeping new.')
                    os.remove(back_1)
                if os.path.isfile(back):
                    print('[AUTO-BACKUP] Now renaming ' + back + ' to ' +
                          back_1)
                    shutil.move(back, back_1)
                print('[AUTO-BACKUP] Now copying db file to ' + back)
                shutil.copy(ogfile, back)

            except OSError as exception:
                if exception.errno != errno.EXIST:
                    raise

            i += 1

    # Rename the main thread
    threading.currentThread().name = "MAIN"

    if mylar.DAEMON:
        mylar.daemonize()

    if mylar.MAINTENANCE is True and any([
            args.exportjson, args.importjson, args.update is True,
            args.importstatus is True, args.fixslashes is True
    ]):
        loggermode = '[MAINTENANCE-MODE]'
        if args.importstatus:  #mylar.MAINTENANCE is True:
            cs = maintenance.Maintenance('status')
            cstat = cs.check_status()
        else:
            logger.info('%s Initializing maintenance mode' % loggermode)

            if args.update is True:
                logger.info(
                    '%s Attempting to update Mylar so things can work again...'
                    % loggermode)
                try:
                    mylar.shutdown(restart=True, update=True, maintenance=True)
                except Exception as e:
                    sys.exit('%s Mylar failed to update: %s' % (loggermode, e))

            elif args.importdatabase:
                #for attempted db import.
                maintenance_path = args.importdatabase
                logger.info('%s db path accepted as %s' %
                            (loggermode, maintenance_path))
                di = maintenance.Maintenance('database-import',
                                             file=maintenance_path)
                d = di.database_import()
            elif args.importjson:
                #for attempted file re-import (json format)
                maintenance_path = args.importjson
                logger.info(
                    '%s file indicated as being in json format - path accepted as %s'
                    % (loggermode, maintenance_path))
                ij = maintenance.Maintenance('json-import',
                                             file=maintenance_path)
                j = ij.json_import()
            #elif args.importtext:
            #    #for attempted file re-import (list format)
            #    maintenance_path = args.importtext
            #    logger.info('%s file indicated as being in list format - path accepted as %s' % (loggermode, maintenance_path))
            #    it = maintenance.Maintenance('list-import', file=maintenance_path)
            #    t = it.list_import()
            elif args.exportjson:
                #for export of db comicid's in json format
                maintenance_path = args.exportjson
                logger.info(
                    '%s file indicated as being written to json format - destination accepted as %s'
                    % (loggermode, maintenance_path))
                ej = maintenance.Maintenance('json-export',
                                             output=maintenance_path)
                j = ej.json_export()
            elif args.fixslashes:
                #for running the fix slashes on the db manually
                logger.info('%s method indicated as fix slashes' % loggermode)
                fs = maintenance.Maintenance('fixslashes')
                j = fs.fix_slashes()
            else:
                logger.info('%s Not a valid command: %s' %
                            (loggermode, maintenance_info))
                sys.exit()
            logger.info('%s Exiting Maintenance mode' % (loggermode))

        #possible option to restart automatically after maintenance has completed...
        sys.exit()

    # Force the http port if neccessary
    if args.port:
        http_port = args.port
        logger.info('Starting Mylar on forced port: %i' % http_port)
    else:
        http_port = int(mylar.CONFIG.HTTP_PORT)

    # Check if pyOpenSSL is installed. It is required for certificate generation
    # and for cherrypy.
    if mylar.CONFIG.ENABLE_HTTPS:
        try:
            import OpenSSL
        except ImportError:
            logger.warn("The pyOpenSSL module is missing. Install this " \
                "module to enable HTTPS. HTTPS will be disabled.")
            mylar.CONFIG.ENABLE_HTTPS = False

    # Try to start the server. Will exit here is address is already in use.
    web_config = {
        'http_port': http_port,
        'http_host': mylar.CONFIG.HTTP_HOST,
        'http_root': mylar.CONFIG.HTTP_ROOT,
        'enable_https': mylar.CONFIG.ENABLE_HTTPS,
        'https_cert': mylar.CONFIG.HTTPS_CERT,
        'https_key': mylar.CONFIG.HTTPS_KEY,
        'https_chain': mylar.CONFIG.HTTPS_CHAIN,
        'http_username': mylar.CONFIG.HTTP_USERNAME,
        'http_password': mylar.CONFIG.HTTP_PASSWORD,
        'authentication': mylar.CONFIG.AUTHENTICATION,
        'login_timeout': mylar.CONFIG.LOGIN_TIMEOUT,
        'opds_enable': mylar.CONFIG.OPDS_ENABLE,
        'opds_authentication': mylar.CONFIG.OPDS_AUTHENTICATION,
        'opds_username': mylar.CONFIG.OPDS_USERNAME,
        'opds_password': mylar.CONFIG.OPDS_PASSWORD,
        'opds_pagesize': mylar.CONFIG.OPDS_PAGESIZE,
    }

    # Try to start the server.
    webstart.initialize(web_config)

    #check for version here after web server initialized so it doesn't try to repeatidly hit github
    #for version info if it's already running
    versioncheck.versionload()

    if mylar.CONFIG.LAUNCH_BROWSER and not args.nolaunch:
        mylar.launch_browser(mylar.CONFIG.HTTP_HOST, http_port,
                             mylar.CONFIG.HTTP_ROOT)

    # Start the background threads
    mylar.start()

    signal.signal(signal.SIGTERM, handler_sigterm)

    while True:
        if not mylar.SIGNAL:
            try:
                time.sleep(1)
            except KeyboardInterrupt:
                mylar.SIGNAL = 'shutdown'
        else:
            logger.info('Received signal: ' + mylar.SIGNAL)
            if mylar.SIGNAL == 'shutdown':
                mylar.shutdown()
            elif mylar.SIGNAL == 'restart':
                mylar.shutdown(restart=True)
            else:
                mylar.shutdown(restart=True, update=True)

            mylar.SIGNAL = None

    return