Example #1
0
    def markissues(self, action=None, **args):
        myDB = db.DBConnection()
        issuesToAdd = []
        issuestoArchive = []
        if action == 'WantedNew':
            newaction = 'Wanted'
        else:
            newaction = action
        for IssueID in args:
            if IssueID is None: continue
            else:
                mi = myDB.action("SELECT * FROM issues WHERE IssueID=?",[IssueID]).fetchone()
                miyr = myDB.action("SELECT ComicYear FROM comics WHERE ComicID=?", [mi['ComicID']]).fetchone()
                if action == 'Downloaded':
                    if mi['Status'] == "Skipped" or mi['Status'] == "Wanted":
                        logger.info(u"Cannot change status to %s as comic is not Snatched or Downloaded" % (newaction))
                        continue
                elif action == 'Archived':
                    logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction))
                    #updater.forceRescan(mi['ComicID'])
                    issuestoArchive.append(IssueID)
                elif action == 'Wanted':
                    logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction))
                    issuesToAdd.append(IssueID)

                controlValueDict = {"IssueID": IssueID}
                newValueDict = {"Status": newaction}
                myDB.upsert("issues", newValueDict, controlValueDict)
        if len(issuestoArchive) > 0:
            updater.forceRescan(mi['ComicID'])
        if len(issuesToAdd) > 0:
            logger.debug("Marking issues: %s as Wanted" % issuesToAdd)
            threading.Thread(target=search.searchIssueIDList, args=[issuesToAdd]).start()
        #if IssueID:
        raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % mi['ComicID'])
Example #2
0
 def find_torrent(self, hash):
     logger.debug('Finding Torrent hash: ' + hash)
     torrent_info = self.get_torrent(hash)
     if torrent_info:
         return True
     else:
         return False
Example #3
0
    def notify(self, message, subject, module=None):
        if module is None:
            module = ''
        module += '[NOTIFIER]'
        sent_successfully = False

        try:
            logger.debug(module + ' Sending email notification. From: [%s] - To: [%s] - Server: [%s] - Port: [%s] - Username: [%s] - Password: [********] - Encryption: [%s] - Message: [%s]' % (self.emailfrom, self.emailto, self.emailsvr, self.emailport, self.emailuser, self.emailenc, message))
            msg = MIMEMultipart()
            msg['From'] = str(self.emailfrom)
            msg['To'] = str(self.emailto)
            msg['Subject'] = subject
            msg['Date'] = formatdate()
            msg['Message-ID'] = make_msgid('mylar')
            msg.attach(MIMEText(message, 'plain'))

            if self.emailenc == 1:
                sock = smtplib.SMTP_SSL(self.emailsvr, str(self.emailport))
            else:
                sock = smtplib.SMTP(self.emailsvr, str(self.emailport))

            if self.emailenc == 2:
                sock.starttls()

            if self.emailuser or self.emailpass:
                sock.login(str(self.emailuser), str(self.emailpass))

            sock.sendmail(str(self.emailfrom), str(self.emailto), msg.as_string())
            sock.quit()
            sent_successfully = True

        except Exception as e:
            logger.warn(module + ' Oh no!! Email notification failed: ' + str(e))

        return sent_successfully
Example #4
0
    def get_torrent(self, torrent):
        torrent = self.conn.get_torrent(torrent.hashString)
        torrent_files = []
        torrent_directory = os.path.normpath(torrent.downloadDir)

        for f in torrent.files().itervalues():
            if not os.path.normpath(f['name']).startswith(torrent_directory):
                file_path = os.path.join(torrent_directory,
                                         f['name'].lstrip('/'))
            else:
                file_path = f['name']

            torrent_files.append(file_path)

        torrent_info = {
            'hash': torrent.hashString,
            'name': torrent.name,
            'folder': torrent.downloadDir,
            'completed': torrent.progress == 100,
            'label':
            'None',  ## labels not supported in transmission - for when it's in transmission
            'files': torrent_files,
            'upload_total': torrent.uploadedEver,
            'download_total': torrent.downloadedEver,
            'ratio': torrent.ratio,
            'total_filesize': torrent.sizeWhenDone,
            'time_started': torrent.date_started
        }
        logger.debug(torrent_info)
        return torrent_info if torrent_info else False
Example #5
0
    def get_torrent(self, torrent):
        torrent = self.conn.get_torrent(torrent.hashString)
        torrent_files = []
        torrent_directory = os.path.normpath(torrent.downloadDir)

        for f in torrent.files().itervalues():
            if not os.path.normpath(f['name']).startswith(torrent_directory):
                file_path = os.path.join(torrent_directory,
                                         f['name'].lstrip('/'))
            else:
                file_path = f['name']

            torrent_files.append(file_path)

        torrent_info = {
            'hash': torrent.hashString,
            'name': torrent.name,
            'folder': torrent.downloadDir,
            'completed': torrent.progress == 100,
            'label': 'None', ## labels not supported in transmission - for when it's in transmission
            'files': torrent_files,
            'upload_total': torrent.uploadedEver,
            'download_total': torrent.downloadedEver,
            'ratio': torrent.ratio,
            'total_filesize': torrent.sizeWhenDone,
            'time_started': torrent.date_started
        }
        logger.debug(torrent_info)
        return torrent_info if torrent_info else False
Example #6
0
File: deluge.py Project: 2mny/mylar
 def find_torrent(self, hash):
     logger.debug('Finding Torrent hash: ' + hash)
     torrent_info = self.get_torrent(hash)
     if torrent_info:
         return True
     else:
         return False
Example #7
0
    def addurl(self, url):
        params = {'action': 'add-url', 'token': self.token, 's': url}
        try:
            r = requests.post(url=self.utorrent_url,
                              auth=self.auth,
                              cookies=self.cookies,
                              params=params)
        except requests.exceptions.RequestException as err:
            logger.debug('URL: ' + str(self.utorrent_url))
            logger.debug(
                'Error sending to uTorrent Client. uTorrent responded with error: '
                + str(err))
            return 'fail'

        # (to-do) verify the hash in order to ensure it's loaded here
        if str(r.status_code) == '200':
            logger.info('Successfully added torrent to uTorrent client.')
            hash = self.calculate_torrent_hash(link=url)
            if mylar.UTORRENT_LABEL:
                try:
                    self.setlabel(hash)
                except:
                    logger.warn('Unable to set label for torrent.')
            return hash
        else:
            return 'fail'
Example #8
0
    def notify(self, message, subject, module=None):
        if module is None:
            module = ''
        module += '[NOTIFIER]'
        sent_successfully = False

        try:
            logger.debug(module + u' Sending email notification. From: [%s] - To: [%s] - Server: [%s] - Port: [%s] - Username: [%s] - Password: [********] - Encryption: [%s] - Message: [%s]' % (self.emailfrom, self.emailto, self.emailsvr, self.emailport, self.emailuser, self.emailenc, message))
            msg = MIMEMultipart()
            msg['From'] = str(self.emailfrom)
            msg['To'] = str(self.emailto)
            msg['Subject'] = subject
            msg.attach(MIMEText(message, 'plain'))

            if self.emailenc is 1:
                sock = smtplib.SMTP_SSL(self.emailsvr, str(self.emailport))
            else:
                sock = smtplib.SMTP(self.emailsvr, str(self.emailport))

            if self.emailenc is 2:
                sock.starttls()

            if self.emailuser or self.emailpass:
                sock.login(str(self.emailuser), str(self.emailpass))

            sock.sendmail(str(self.emailfrom), str(self.emailto), msg.as_string())
            sock.quit()
            sent_successfully = True

        except Exception, e:
            logger.warn(module + u' Oh no!! Email notification failed: ' + str(e))
Example #9
0
    def addfile(self, filepath=None, filename=None, bytes=None):
        params = {'action': 'add-file', 'token': self.token}
        try:
            d = open(filepath, 'rb')
            tordata = d.read()
            d.close()
        except:
            logger.warn('Unable to load torrent file. Aborting at this time.')
            return 'fail'

        files = {'torrent_file': tordata}
        try:
            r = requests.post(url=self.utorrent_url, auth=self.auth, cookies=self.cookies, params=params, files=files)
        except requests.exceptions.RequestException as err:
            logger.debug('URL: ' + str(self.utorrent_url))
            logger.debug('Error sending to uTorrent Client. uTorrent responded with error: ' + str(err))
            return 'fail'


        # (to-do) verify the hash in order to ensure it's loaded here
        if str(r.status_code) == '200':
            logger.info('Successfully added torrent to uTorrent client.')
            hash = self.calculate_torrent_hash(data=tordata)
            if mylar.UTORRENT_LABEL:
                try:
                    self.setlabel(hash)
                except:
                    logger.warn('Unable to set label for torrent.')
            return hash
        else:
            return 'fail'
Example #10
0
def runGit(args):

    git_locations = []
    if mylar.CONFIG.GIT_PATH is not None:
        git_locations.append(mylar.CONFIG.GIT_PATH)

    git_locations.append('git')

    if platform.system().lower() == 'darwin':
        git_locations.append('/usr/local/git/bin/git')

    output = err = None

    for cur_git in git_locations:
        gitworked = False

        cmd = '%s %s' % (cur_git, args)

        try:
            logger.debug('Trying to execute: %s with shell in %s' %
                         (cmd, mylar.PROG_DIR))
            output = subprocess.run(cmd,
                                    text=True,
                                    capture_output=True,
                                    shell=True,
                                    cwd=mylar.PROG_DIR)
            logger.debug('Git output: %s' % output)
            gitworked = True
        except Exception as e:
            logger.error('Command %s didn\'t work [%s]' % (cmd, e))
            gitworked = False
            continue
        else:
            if all([
                    output.stderr is not None, output.stderr != '',
                    output.returncode > 0
            ]):
                logger.error('Encountered error: %s' % output.stderr)
                gitworked = False

        if "not found" in output.stdout or "not recognized as an internal or external command" in output.stdout:
            logger.error('[%s] Unable to find git with command: %s' %
                         (output.stdout, cmd))
            output = None
            gitworked = False
        elif ('fatal:' in output.stdout) or ('fatal:' in output.stderr):
            logger.error('Error: %s' % output.stderr)
            logger.error(
                'Git returned bad info. Are you sure this is a git installation? [%s]'
                % output.stdout)
            output = None
            gitworked = False
        elif gitworked:
            break

    return (output.stdout, output.stderr)
Example #11
0
    def dir_scan(self):
        logger.debug("Dir Scan Requested")
        full_paths = []
        full_paths.append(mylar.CONFIG.DESTINATION_DIR)
        for root, dirs, files in os.walk(mylar.CONFIG.DESTINATION_DIR):
            full_paths.extend(os.path.join(root, d) for d in dirs)

        logger.info("Dir Scan Completed")
        logger.info("%i Dirs Found" % (len(full_paths)))
        return full_paths
Example #12
0
 def get_torrent(self, hash):
     logger.debug('Getting Torrent info hash: ' + hash)
     try:
         torrent_info = self.client.get_torrent(hash)
     except Exception as e:
         logger.error('Could not get torrent info for ' + hash)
         return False
     else:
         logger.info('Successfully located information for torrent')
         return torrent_info
Example #13
0
 def get_torrent(self, hash):
     logger.debug('Getting Torrent info hash: ' + hash)
     try:
         torrent_info = self.client.get_torrent(hash)
     except Exception as e:
         logger.error('Could not get torrent info for ' + hash)
         return False
     else:
         logger.info('Successfully located information for torrent')
         return torrent_info
Example #14
0
 def _get_token(self):
     url = urlparse.urljoin(self.base_url, "gui/token.html")
     try:
         response = self.opener.open(url)
     except urllib2.HTTPError as err:
         logger.debug("URL: " + str(url))
         logger.debug("Error getting Token. uTorrent responded with error: " + str(err))
         return
     match = re.search(utorrentclient.TOKEN_REGEX, response.read())
     return match.group(1)
Example #15
0
 def get_torrent(self, hash):
     logger.debug('Getting Torrent info hash: ' + hash)
     try:
         torrent_info = self.client.call('core.get_torrent_status', hash, '')
     except Exception as e:
         logger.error('Could not get torrent info for ' + hash)
         return False
     else:
         logger.info('Getting Torrent Info!')
         return torrent_info
Example #16
0
File: deluge.py Project: 2mny/mylar
    def get_the_hash(self, filepath):
        import hashlib, StringIO
        import bencode

        # Open torrent file
        torrent_file = open(filepath, "rb")
        metainfo = bencode.decode(torrent_file.read())
        info = metainfo['info']
        thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
        logger.debug('Hash: ' + thehash)
        return thehash
Example #17
0
    def get_artwork_from_cache(self, ComicID=None, imageURL=None):
        """
        Pass a comicvine id to this function (either ComicID or IssueID)
        """

        self.query_type = "artwork"

        if ComicID:
            self.id = ComicID
            self.id_type = "comic"
        else:
            self.id = IssueID
            self.id_type = "issue"

        if self._exists("artwork") and self._is_current(filename=self.artwork_files[0]):
            return self.artwork_files[0]
        else:
            # we already have the image for the comic in the sql db. Simply retrieve it, and save it.
            image_url = imageURL
            logger.debug("Retrieving comic image from: " + image_url)
            try:
                artwork = urllib2.urlopen(image_url, timeout=20).read()
            except Exception, e:
                logger.error('Unable to open url "' + image_url + '". Error: ' + str(e))
                artwork = None

            if artwork:

                # Make sure the artwork dir exists:
                if not os.path.isdir(self.path_to_art_cache):
                    try:
                        os.makedirs(self.path_to_art_cache)
                    except Exception, e:
                        logger.error("Unable to create artwork cache dir. Error: " + str(e))
                        self.artwork_errors = True
                        self.artwork_url = image_url
                # Delete the old stuff
                for artwork_file in self.artwork_files:
                    try:
                        os.remove(artwork_file)
                    except:
                        logger.error("Error deleting file from the cache: " + artwork_file)

                ext = os.path.splitext(image_url)[1]

                artwork_path = os.path.join(self.path_to_art_cache, self.id + "." + helpers.today() + ext)
                try:
                    f = open(artwork_path, "wb")
                    f.write(artwork)
                    f.close()
                except Exception, e:
                    logger.error("Unable to write to the cache dir: " + str(e))
                    self.artwork_errors = True
                    self.artwork_url = image_url
Example #18
0
    def get_the_hash(self, filepath):
        import hashlib, StringIO
        import bencode

        # Open torrent file
        torrent_file = open(filepath, "rb")
        metainfo = bencode.decode(torrent_file.read())
        info = metainfo['info']
        thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
        logger.debug('Hash: ' + thehash)
        return thehash
Example #19
0
 def _get_token(self):
     url = urlparse.urljoin(self.base_url, 'gui/token.html')
     try:
         response = self.opener.open(url)
     except urllib2.HTTPError as err:
         logger.debug('URL: ' + str(url))
         logger.debug(
             'Error getting Token. uTorrent responded with error: ' +
             str(err))
         return
     match = re.search(utorrentclient.TOKEN_REGEX, response.read())
     return match.group(1)
Example #20
0
 def reading_images(self, ish_id):
     logger.debug("Image List Requested")
     image_list = []
     image_src = os.path.join(mylar.CONFIG.CACHE_DIR, "webviewer", ish_id)
     image_loc = os.path.join(mylar.CONFIG.HTTP_ROOT, 'cache', "webviewer", ish_id)
     for root, dirs, files in os.walk(image_src):
         for f in files:
             if f.endswith((".png", ".gif", ".bmp", ".dib", ".jpg", ".jpeg", ".jpe", ".jif", ".jfif", ".jfi", ".tiff", ".tif")):
                 image_list.append( os.path.join(image_loc, f) )
                 image_list.sort()
     logger.debug("Image List Created")
     return image_list
Example #21
0
def check_setting_int(config, cfg_name, item_name, def_val):
    try:
        my_val = int(config[cfg_name][item_name])
    except:
        my_val = def_val
        try:
            config[cfg_name][item_name] = my_val
        except:
            config[cfg_name] = {}
            config[cfg_name][item_name] = my_val
    logger.debug(item_name + " -> " + str(my_val))
    return my_val
Example #22
0
def check_setting_int(config, cfg_name, item_name, def_val):
    try:
        my_val = int(config[cfg_name][item_name])
    except:
        my_val = def_val
        try:
            config[cfg_name][item_name] = my_val
        except:
            config[cfg_name] = {}
            config[cfg_name][item_name] = my_val
    logger.debug(item_name + " -> " + str(my_val))
    return my_val
Example #23
0
    def _get_token(self):
        TOKEN_REGEX = r'<div[^>]*id=[\"\']token[\"\'][^>]*>([^<]*)</div>'
        utorrent_url_token = '%stoken.html' % self.utorrent_url
        try:
            r = requests.get(utorrent_url_token, auth=self.auth)
        except requests.exceptions.RequestException as err:
            logger.debug('URL: ' + str(utorrent_url_token))
            logger.debug('Error getting Token. uTorrent responded with error: ' + str(err))
            return 'fail'

        token = re.search(TOKEN_REGEX, r.text).group(1)
        guid = r.cookies['GUID']
        cookies = dict(GUID = guid)
        return token, cookies
Example #24
0
        def valid_login_attempt(self, un, pw):
            '''
                Does the actual POST to the login.php method (using the ajax parameter, which is far more reliable
                than HTML parsing.

                Input: un: The username (usually would be self.un, but that's not a requirement
                       pw: The password (usually self.pw but not a requirement)

                Note: The underlying self.ses object will handle setting the session cookie from a valid login,
                but you'll need to call the save method if your cookies are being persisted.

                Returns: True (success) False (failure)

            '''

            postdata = {'username': un, 'password': pw, 'keeplogged': 1}
            u = 'https://32pag.es/login.php?ajax=1'

            try:
                r = self.ses.post(u, data=postdata, timeout=60, allow_redirects=True)
                logger.debug('%s Status Code: %s' % (self.module, r.status_code))
            except Exception as e:
                logger.error('%s Got an exception when trying to login: %s' % (self.module, e))
                self.error = {'status':'exception', 'message':'Exception when trying to login'}
                return False

            if r.status_code != 200:
                logger.warn('%s Got bad status code from login POST: %d\n%s\n%s' % (self.module, r.status_code, r.text, r.headers))
                logger.debug('%s Request URL: %s \n Content: %s \n History: %s' % (self.module, r.url ,r.text, r.history))
                self.error = {'status':'Bad Status code', 'message':(r.status_code, r.text, r.headers)}
                return False

            try:
                logger.debug('%s Trying to analyze login JSON reply from 32P: %s' % (self.module, r.text))
                d = r.json()
            except:
                logger.debug('%s Request URL: %s \n Content: %s \n History: %s' % (self.module, r.url ,r.text, r.history))
                logger.error('%s The data returned by the login page was not JSON: %s' % (self.module, r.text))
                self.error = {'status':'JSON not returned', 'message':r.text}
                return False

            if d['status'] == 'success':
                return True

            logger.error('%s Got unexpected status result: %s' % (self.module, d))
            logger.debug('%s Request URL: %s \n Content: %s \n History: %s \n Json: %s' % (self.module, r.url ,r.text, r.history, d))
            self.error = d
            return False
Example #25
0
        def valid_login_attempt(self, un, pw):
            '''
                Does the actual POST to the login.php method (using the ajax parameter, which is far more reliable
                than HTML parsing.

                Input: un: The username (usually would be self.un, but that's not a requirement
                       pw: The password (usually self.pw but not a requirement)

                Note: The underlying self.ses object will handle setting the session cookie from a valid login,
                but you'll need to call the save method if your cookies are being persisted.

                Returns: True (success) False (failure)

            '''

            postdata = {'username': un, 'password': pw, 'keeplogged': 1}
            u = 'https://32pag.es/login.php?ajax=1'

            try:
                r = self.ses.post(u, data=postdata, timeout=60, allow_redirects=True)
                logger.debug(self.module + ' Status Code: ' + str(r.status_code))
            except Exception as e:
                logger.error(self.module + " Got an exception when trying to login to %s POST", u)
                self.error = {'status':'exception', 'message':'Exception when trying to login'}
                return False

            if r.status_code != 200:
                logger.warn(self.module + " Got bad status code from login POST: %d\n%s\n%s", r.status_code, r.text, r.headers)
                logger.debug(self.module + " Request URL: %s \n Content: %s \n History: %s", r.url ,r.text, r.history)
                self.error = {'status':'Bad Status code', 'message':(r.status_code, r.text, r.headers)}
                return False

            try:
                logger.debug(self.module + ' Trying to analyze login JSON reply from 32P: %s', r.text)
                d = r.json()
            except:
                logger.debug(self.module + " Request URL: %s \n Content: %s \n History: %s", r.url ,r.text, r.history)
                logger.error(self.module + " The data returned by the login page was not JSON: %s", r.text)
                self.error = {'status':'JSON not returned', 'message':r.text}
                return False

            if d['status'] == 'success':
                return True

            logger.error(self.module + " Got unexpected status result: %s", d)
            logger.debug(self.module + " Request URL: %s \n Content: %s \n History: %s \n Json: %s", r.url ,r.text, r.history, d)
            self.error = d
            return False
Example #26
0
    def _get_token(self):
        TOKEN_REGEX = r'<div[^>]*id=[\"\']token[\"\'][^>]*>([^<]*)</div>'
        utorrent_url_token = '%stoken.html' % self.utorrent_url
        try:
            r = requests.get(utorrent_url_token, auth=self.auth)
        except requests.exceptions.RequestException as err:
            logger.debug('URL: ' + str(utorrent_url_token))
            logger.debug(
                'Error getting Token. uTorrent responded with error: ' +
                str(err))
            return 'fail'

        token = re.search(TOKEN_REGEX, r.text).group(1)
        guid = r.cookies['GUID']
        cookies = dict(GUID=guid)
        return token, cookies
Example #27
0
def check_setting_str(config, cfg_name, item_name, def_val, log=True):
    try:
        my_val = config[cfg_name][item_name]
    except:
        my_val = def_val
        try:
            config[cfg_name][item_name] = my_val
        except:
            config[cfg_name] = {}
            config[cfg_name][item_name] = my_val

    if log:
        logger.debug(item_name + " -> " + my_val)
    else:
        logger.debug(item_name + " -> ******")
    return my_val
Example #28
0
def check_setting_str(config, cfg_name, item_name, def_val, log=True):
    try:
        my_val = config[cfg_name][item_name]
    except:
        my_val = def_val
        try:
            config[cfg_name][item_name] = my_val
        except:
            config[cfg_name] = {}
            config[cfg_name][item_name] = my_val

    if log:
        logger.debug(item_name + " -> " + my_val)
    else:
        logger.debug(item_name + " -> ******")
    return my_val
Example #29
0
def calculate_torrent_hash(link, data=None):
    """
    Calculate the torrent hash from a magnet link or data. Raises a ValueError
    when it cannot create a torrent hash given the input data.
    """

    if link.startswith("magnet:"):
        torrent_hash = re.findall("urn:btih:([\w]{32,40})", link)[0]
        if len(torrent_hash) == 32:
            torrent_hash = b16encode(b32decode(torrent_hash)).lower()
    elif data:
        info = bdecode(data)["info"]
        torrent_hash = sha1(bencode(info)).hexdigest()
    else:
        raise ValueError("Cannot calculate torrent hash without magnet link " "or data")
    logger.debug("Torrent hash: " + torrent_hash)
    return torrent_hash.upper()
Example #30
0
def calculate_torrent_hash(link, data=None):
    """
    Calculate the torrent hash from a magnet link or data. Raises a ValueError
    when it cannot create a torrent hash given the input data.
    """

    if link.startswith("magnet:"):
        torrent_hash = re.findall("urn:btih:([\w]{32,40})", link)[0]
        if len(torrent_hash) == 32:
            torrent_hash = b16encode(b32decode(torrent_hash)).lower()
    elif data:
        info = bdecode(data)["info"]
        torrent_hash = sha1(bencode(info)).hexdigest()
    else:
        raise ValueError("Cannot calculate torrent hash without magnet link " \
                         "or data")
    logger.debug("Torrent hash: " + torrent_hash)
    return torrent_hash.upper()
Example #31
0
 def reading_images(self, ish_id):
     logger.debug("Image List Requested")
     image_list = []
     image_src = os.path.join(mylar.CONFIG.CACHE_DIR, "webviewer", ish_id)
     image_loc = mylar.CONFIG.HTTP_ROOT + '/'.join(
         ['cache', "webviewer", ish_id])
     for root, dirs, files in os.walk(image_src):
         for f in files:
             if f.endswith(
                 (".png", ".gif", ".bmp", ".dib", ".jpg", ".jpeg", ".jpe",
                  ".jif", ".jfif", ".jfi", ".tiff", ".tif")):
                 rel_dir = os.path.relpath(root, image_src)
                 rel_file = os.path.join(rel_dir, f)
                 image_list.append(
                     urllib.parse.quote(os.path.join(image_loc, rel_file)))
                 image_list.sort()
     logger.debug("Image List Created")
     return image_list
Example #32
0
    def _action(self, params, body=None, content_type=None):

        if not self.token:
            return

        url = self.base_url + "/gui/" + "?token=" + self.token + "&" + urllib.urlencode(params)
        request = urllib2.Request(url)

        if body:
            request.add_data(body)
            request.add_header("Content-length", len(body))
        if content_type:
            request.add_header("Content-type", content_type)

        try:
            response = self.opener.open(request)
            return response.code, json.loads(response.read())
        except urllib2.HTTPError as err:
            logger.debug("URL: " + str(url))
            logger.debug("uTorrent webUI raised the following error: " + str(err))
Example #33
0
def daemonize():

    if threading.activeCount() != 1:
        logger.warn('There are %r active threads. Daemonizing may cause \
                        strange behavior.' % threading.enumerate())
    
    sys.stdout.flush()
    sys.stderr.flush()
    
    # Do first fork
    try:
        pid = os.fork()
        if pid == 0:
            pass
        else:
            # Exit the parent process
            logger.debug('Forking once...')
            os._exit(0)
    except OSError, e:
        sys.exit("1st fork failed: %s [%d]" % (e.strerror, e.errno))
Example #34
0
def daemonize():

    if threading.activeCount() != 1:
        logger.warn('There are %r active threads. Daemonizing may cause \
                        strange behavior.' % threading.enumerate())
    
    sys.stdout.flush()
    sys.stderr.flush()
    
    # Do first fork
    try:
        pid = os.fork()
        if pid == 0:
            pass
        else:
            # Exit the parent process
            logger.debug('Forking once...')
            os._exit(0)
    except OSError, e:
        sys.exit("1st fork failed: %s [%d]" % (e.strerror, e.errno))
Example #35
0
    def load_torrent(self, filepath):
        
        logger.info('filepath to torrent file set to : ' + filepath)
        torrent_id = False
                
        if self.client.connected is True:
            logger.info('Checking if Torrent Exists!')
            
            torrentcontent = open(filepath, 'rb').read()
            hash = str.lower(self.get_the_hash(filepath)) # Deluge expects a lower case hash

            logger.debug('Torrent Hash (load_torrent): "' + hash + '"')
            logger.debug('FileName (load_torrent): ' + str(os.path.basename(filepath)))


            #Check if torrent already added
            if self.find_torrent(str.lower(hash)):
                logger.info('load_torrent: Torrent already exists!')
            else:
                logger.info('Torrent not added yet, trying to add it now!')
                try:
                    torrent_id = self.client.call('core.add_torrent_file', str(os.path.basename(filepath)), base64.encodestring(torrentcontent), '')
                except Exception as e:
                    logger.debug('Torrent not added')
                    return False
                else:
                    logger.debug('TorrentID: ' + torrent_id)

                # If label enabled put label on torrent in Deluge
                if torrent_id and mylar.DELUGE_LABEL:
                    logger.info ('Setting label to ' + mylar.DELUGE_LABEL)
                    try:
                        self.client.call('label.set_torrent', torrent_id, mylar.DELUGE_LABEL)
                    except:
                        #if label isn't set, let's try and create one.
                        try:
                            self.client.call('label.add', mylar.DELUGE_LABEL)
                            self.client.call('label.set_torrent', torrent_id, mylar.DELUGE_LABEL)
                        except:
                            logger.warn('Unable to set label - Either try to create it manually within Deluge, and/or ensure there are no spaces, capitalization or special characters in label')
                            return False
                    logger.info('Succesfully set label to ' + mylar.DELUGE_LABEL)
        try:
            self.find_torrent(torrent_id)
            logger.info('Double checking torrent was added.')
        except Exception as e:
            logger.warn('Torrent was not added! Please check logs')
            return False
        else:
            logger.info('Torrent successfully added!')
            return True
Example #36
0
 def markComics(self, action=None, **args):
     myDB = db.DBConnection()
     comicsToAdd = []
     for ComicID in args:
         if action == 'delete':
             myDB.action('DELETE from comics WHERE ComicID=?', [ComicID])
             myDB.action('DELETE from issues WHERE ComicID=?', [ComicID])
         elif action == 'pause':
             controlValueDict = {'ComicID': ComicID}
             newValueDict = {'Status': 'Paused'}
             myDB.upsert("comics", newValueDict, controlValueDict)
         elif action == 'resume':
             controlValueDict = {'ComicID': ComicID}
             newValueDict = {'Status': 'Active'}
             myDB.upsert("comics", newValueDict, controlValueDict)              
         else:
             comicsToAdd.append(ComicID)
     if len(comicsToAdd) > 0:
         logger.debug("Refreshing comics: %s" % comicsToAdd)
         threading.Thread(target=importer.addComicIDListToDB, args=[comicsToAdd]).start()
     raise cherrypy.HTTPRedirect("home")
Example #37
0
    def notify(self, text, attachment_text, module=None):
        if module is None:
            module = ''
        module += '[NOTIFIER]'
        sent_successfully = False

        try:
            logger.debug(module +
                         ' Sending sms notification from [%s] to [%s] ' %
                         (self.phone_from, self.phone_to))
            if self.signal is None:
                sent_successfully = False
            else:
                self.signal.send_message(self.phone_to, text)
                self.signal.send_message(self.phone_to, attachment_text)
                sent_successfully = True
        except Exception as e:
            logger.info(module + ' Signal notify failed: ' + str(e))

        logger.info(module + ' Signal notifications sent.')
        return sent_successfully
Example #38
0
    def addurl(self, url):
        params = {'action': 'add-url', 'token': self.token, 's': url}
        try:
            r = requests.post(url=self.utorrent_url, auth=self.auth, cookies=self.cookies, params=params)
        except requests.exceptions.RequestException as err:
            logger.debug('URL: ' + str(self.utorrent_url))
            logger.debug('Error sending to uTorrent Client. uTorrent responded with error: ' + str(err))
            return 'fail'

        # (to-do) verify the hash in order to ensure it's loaded here
        if str(r.status_code) == '200':
            logger.info('Successfully added torrent to uTorrent client.')
            hash = self.calculate_torrent_hash(link=url)
            if mylar.UTORRENT_LABEL:
                try:
                    self.setlabel(hash)
                except:
                    logger.warn('Unable to set label for torrent.')
            return hash
        else:
            return 'fail'
Example #39
0
    def _action(self, params, body=None, content_type=None):

        if not self.token:
            return

        url = self.base_url + '/gui/' + '?token=' + self.token + '&' + urllib.urlencode(
            params)
        request = urllib2.Request(url)

        if body:
            request.add_data(body)
            request.add_header('Content-length', len(body))
        if content_type:
            request.add_header('Content-type', content_type)

        try:
            response = self.opener.open(request)
            return response.code, json.loads(response.read())
        except urllib2.HTTPError as err:
            logger.debug('URL: ' + str(url))
            logger.debug('uTorrent webUI raised the following error: ' +
                         str(err))
Example #40
0
 def markissues(self, action=None, **args):
     myDB = db.DBConnection()
     issuesToAdd = []
     if action == 'WantedNew':
         newaction = 'Wanted'
     else:
         newaction = action
     for IssueID in args:
         if IssueID is None: continue
         else:
             mi = myDB.action("SELECT * FROM issues WHERE IssueID=?",[IssueID]).fetchone()
             miyr = myDB.action("SELECT ComicYear FROM comics WHERE ComicID=?", [mi['ComicID']]).fetchone()
             logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction))
             controlValueDict = {"IssueID": IssueID}
             newValueDict = {"Status": newaction}
             myDB.upsert("issues", newValueDict, controlValueDict)
             if action == 'Wanted':
                 issuesToAdd.append(IssueID)
     if len(issuesToAdd) > 0:
         logger.debug("Marking issues: %s" % issuesToAdd)
         threading.Thread(target=search.searchIssueIDList, args=[issuesToAdd]).start()
     #if IssueID:
     raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % mi['ComicID'])
Example #41
0
 def markArtists(self, action=None, **args):
     myDB = db.DBConnection()
     artistsToAdd = []
     for ArtistID in args:
         if action == 'delete':
             myDB.action('DELETE from artists WHERE ArtistID=?', [ArtistID])
             myDB.action('DELETE from albums WHERE ArtistID=?', [ArtistID])
             myDB.action('DELETE from tracks WHERE ArtistID=?', [ArtistID])
             myDB.action('INSERT OR REPLACE into blacklist VALUES (?)', [ArtistID])
         elif action == 'pause':
             controlValueDict = {'ArtistID': ArtistID}
             newValueDict = {'Status': 'Paused'}
             myDB.upsert("artists", newValueDict, controlValueDict)
         elif action == 'resume':
             controlValueDict = {'ArtistID': ArtistID}
             newValueDict = {'Status': 'Active'}
             myDB.upsert("artists", newValueDict, controlValueDict)              
         else:
             artistsToAdd.append(ArtistID)
     if len(artistsToAdd) > 0:
         logger.debug("Refreshing artists: %s" % artistsToAdd)
         threading.Thread(target=importer.addArtistIDListToDB, args=[artistsToAdd]).start()
     raise cherrypy.HTTPRedirect("home")
Example #42
0
    def addfile(self, filepath=None, filename=None, bytes=None):
        params = {'action': 'add-file', 'token': self.token}
        try:
            d = open(filepath, 'rb')
            tordata = d.read()
            d.close()
        except:
            logger.warn('Unable to load torrent file. Aborting at this time.')
            return 'fail'

        files = {'torrent_file': tordata}
        try:
            r = requests.post(url=self.utorrent_url,
                              auth=self.auth,
                              cookies=self.cookies,
                              params=params,
                              files=files)
        except requests.exceptions.RequestException as err:
            logger.debug('URL: ' + str(self.utorrent_url))
            logger.debug(
                'Error sending to uTorrent Client. uTorrent responded with error: '
                + str(err))
            return 'fail'

        # (to-do) verify the hash in order to ensure it's loaded here
        if str(r.status_code) == '200':
            logger.info('Successfully added torrent to uTorrent client.')
            if mylar.UTORRENT_LABEL:
                try:
                    hash = self.calculate_torrent_hash(data=tordata)
                    self.setlabel(hash)
                except:
                    logger.warn('Unable to set label for torrent.')

            return 'pass'
        else:
            return 'fail'
Example #43
0
def runGit(args):

    if mylar.GIT_PATH:
        git_locations = ['"' + mylar.GIT_PATH + '"']
    else:
        git_locations = ['git']

    if platform.system().lower() == 'darwin':
        git_locations.append('/usr/local/git/bin/git')

    output = err = None

    for cur_git in git_locations:

        cmd = cur_git + ' ' + args

        try:
            logger.debug('Trying to execute: "' + cmd + '" with shell in ' +
                         mylar.PROG_DIR)
            p = subprocess.Popen(cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT,
                                 shell=True,
                                 cwd=mylar.PROG_DIR)
            output, err = p.communicate()
            logger.debug('Git output: ' + output)
        except OSError:
            logger.debug('Command ' + cmd +
                         ' didn\'t work, couldn\'t find git')
            continue

        if 'not found' in output or "not recognized as an internal or external command" in output:
            logger.debug('Unable to find git with command ' + cmd)
            output = None
        elif 'fatal:' in output or err:
            logger.error(
                'Git returned bad info. Are you sure this is a git installation?'
            )
            output = None
        elif output:
            break

    return (output, err)
Example #44
0
def runGit(args):

    if mylar.GIT_PATH:
        git_locations = ['"'+mylar.GIT_PATH+'"']
    else:
        git_locations = ['git']
        
    if platform.system().lower() == 'darwin':
        git_locations.append('/usr/local/git/bin/git')
        
    
    output = err = None
    
    for cur_git in git_locations:
    
        cmd = cur_git+' '+args
    
        try:
            logger.debug('Trying to execute: "' + cmd + '" with shell in ' + mylar.PROG_DIR)
            p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=mylar.PROG_DIR)
            output, err = p.communicate()
            logger.debug('Git output: ' + output)
        except OSError:
            logger.debug('Command ' + cmd + ' didn\'t work, couldn\'t find git')
            continue
            
        if 'not found' in output or "not recognized as an internal or external command" in output:
            logger.debug('Unable to find git with command ' + cmd)
            output = None
        elif 'fatal:' in output or err:
            logger.error('Git returned bad info. Are you sure this is a git installation?')
            output = None
        elif output:
            break
            
    return (output, err)
Example #45
0
            logger.info(u"Error Retrieving weekly pull list - attempting to adjust")
            c.execute('DROP TABLE weekly')    
            c.execute('CREATE TABLE IF NOT EXISTS weekly (SHIPDATE text, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text)')
            pulldate = '00000000'
            logger.fdebug(u"Table re-created, trying to populate")
    else:
        logger.info(u"No pullist found...I'm going to try and get a new list now.")
        pulldate = '00000000'
    if pulldate is None: pulldate = '00000000'
    PULLURL = 'http://www.previewsworld.com/shipping/newreleases.txt'
    #PULLURL = 'http://www.previewsworld.com/Archive/GetFile/1/1/71/994/081512.txt'

    #Prepare the Substitute name switch for pulllist to comic vine conversion
    substitutes = os.path.join(mylar.DATA_DIR,"substitutes.csv")
    if not os.path.exists(substitutes):
        logger.debug('no substitues.csv file located - not performing substitutions on weekly pull list')
        substitute_check = False
    else:
        substitute_check = True
        #shortrep is the name to be replaced, longrep the replacement
        shortrep=[]
        longrep=[]
        #open the file data
        with open(substitutes) as f:
            reader = csv.reader(f, delimiter='|')
            for row in reader:
                if not row[0].startswith('#'): 
                    logger.fdebug("Substitutes file read : "+str(row))
                    shortrep.append(row[0])
                    longrep.append(row[1])
        f.close()
Example #46
0
    except Exception, e:
        logger.warn('Error fetching data from %s (%s): %s' % (site, url, e))
        if site == '32P':
            logger.info('[TOR2CLIENT-32P] Retrying with 32P')
            if mylar.MODE_32P == 1:
                
                logger.info('[TOR2CLIENT-32P] Attempting to re-authenticate against 32P and poll new keys as required.')
                feed32p = auth32p.info32p(reauthenticate=True)
                feedinfo = feed32p.authenticate()

                if feedinfo == "disable":
                    mylar.ENABLE_32P = 0
                    mylar.config_write()
                    return "fail"
                
                logger.debug('[TOR2CLIENT-32P] Creating CF Scraper')
                scraper = cfscrape.create_scraper()

                logger.debug('[TOR2CLIENT-32P] payload: %s \n verify %s \n headers %s \n', payload, verify, headers)
                
                try:
                    r = scraper.get(url, params=payload, verify=verify, allow_redirects=True)
                except Exception, e:
                    logger.warn('[TOR2CLIENT-32P] Unable to GET %s (%s): %s' % (site, url, e))
                    return "fail"
            else:
                logger.warn('[TOR2CLIENT-32P] Unable to authenticate using existing RSS Feed given. Make sure that you have provided a CURRENT feed from 32P')
                return "fail"
        else:
            logger.info('blah: ' + str(r.status_code))
            return "fail"
Example #47
0
 def setprops(self, hash, s, val):
     params = [("action", "setprops"), ("hash", hash), ("s", s), ("v", val)]
     logger.debug("Params: " + str(params))
     return self._action(params)
Example #48
0
        if pid == 0:
            pass
        else:
            # Exit the parent process
            logger.debug('Forking once...')
            os._exit(0)
    except OSError, e:
        sys.exit("1st fork failed: %s [%d]" % (e.strerror, e.errno))
        
    os.setsid()

    # Do second fork
    try:
        pid = os.fork()
        if pid > 0:
            logger.debug('Forking twice...')
            os._exit(0) # Exit second parent process
    except OSError, e:
        sys.exit("2nd fork failed: %s [%d]" % (e.strerror, e.errno))

    os.chdir("/")
    os.umask(0)
    
    si = open('/dev/null', "r")
    so = open('/dev/null', "a+")
    se = open('/dev/null', "a+")
    
    os.dup2(si.fileno(), sys.stdin.fileno())
    os.dup2(so.fileno(), sys.stdout.fileno())
    os.dup2(se.fileno(), sys.stderr.fileno())
Example #49
0
    def _root(self, **kwargs):
        myDB = db.DBConnection()
        feed = {}
        feed['title'] = 'Mylar OPDS'
        feed['id'] = 'OPDSRoot'
        feed['updated'] = mylar.helpers.now()
        links = []
        entries = []
        links.append(
            getLink(
                href=self.opdsroot,
                type=
                'application/atom+xml; profile=opds-catalog; kind=navigation',
                rel='start',
                title='Home'))
        links.append(
            getLink(
                href=self.opdsroot,
                type=
                'application/atom+xml; profile=opds-catalog; kind=navigation',
                rel='self'))
        links.append(
            getLink(href='%s?cmd=search' % self.opdsroot,
                    type='application/opensearchdescription+xml',
                    rel='search',
                    title='Search'))
        publishers = myDB.select(
            "SELECT ComicPublisher from comics GROUP BY ComicPublisher")
        entries.append({
            'title': 'Recent Additions',
            'id': 'Recent',
            'updated': mylar.helpers.now(),
            'content': 'Recently Added Issues',
            'href': '%s?cmd=Recent' % self.opdsroot,
            'kind': 'acquisition',
            'rel': 'subsection',
        })
        if len(publishers) > 0:
            count = len(publishers)
            entries.append({
                'title': 'Publishers (%s)' % count,
                'id': 'Publishers',
                'updated': mylar.helpers.now(),
                'content': 'List of Comic Publishers',
                'href': '%s?cmd=Publishers' % self.opdsroot,
                'kind': 'navigation',
                'rel': 'subsection',
            })
        comics = mylar.helpers.havetotals()
        count = 0
        for comic in comics:
            if comic['haveissues'] > 0:
                count += 1
        if count > -1:
            entries.append({
                'title': 'All Titles (%s)' % count,
                'id': 'AllTitles',
                'updated': mylar.helpers.now(),
                'content': 'List of All Comics',
                'href': '%s?cmd=AllTitles' % self.opdsroot,
                'kind': 'navigation',
                'rel': 'subsection',
            })
        storyArcs = mylar.helpers.listStoryArcs()
        logger.debug(storyArcs)
        if len(storyArcs) > 0:
            entries.append({
                'title': 'Story Arcs (%s)' % len(storyArcs),
                'id': 'StoryArcs',
                'updated': mylar.helpers.now(),
                'content': 'List of Story Arcs',
                'href': '%s?cmd=StoryArcs' % self.opdsroot,
                'kind': 'navigation',
                'rel': 'subsection',
            })
        readList = myDB.select("SELECT * from readlist")
        if len(readList) > 0:
            entries.append({
                'title': 'Read List (%s)' % len(readList),
                'id': 'ReadList',
                'updated': mylar.helpers.now(),
                'content': 'Current Read List',
                'href': '%s?cmd=ReadList' % self.opdsroot,
                'kind': 'navigation',
                'rel': 'subsection',
            })

        feed['links'] = links
        feed['entries'] = entries
        self.data = feed
        return
Example #50
0
    cursor.execute("CREATE TABLE IF NOT EXISTS weekly (SHIPDATE, PUBLISHER text, ISSUE text, COMIC VARCHAR(150), EXTRA text, STATUS text, ComicID text);")
    connection.commit()


    csvfile = open(newfl, "rb")
    creader = csv.reader(csvfile, delimiter='\t')
    t=1

    for row in creader:
        if "MERCHANDISE" in row: break
        if "MAGAZINES" in row: break
        if "BOOK" in row: break
        #print (row)
        try:
            logger.debug("Row: %s" % row)
            cursor.execute("INSERT INTO weekly VALUES (?,?,?,?,?,?,null);", row)
        except Exception, e:
            #print ("Error - invald arguments...-skipping")
            pass
        t+=1
    csvfile.close()
    connection.commit()
    connection.close()
    logger.info(u"Weekly Pull List successfully loaded.")
    #let's delete the files
    pullpath = str(mylar.CACHE_DIR) + "/"
    os.remove( str(pullpath) + "Clean-newreleases.txt" )
    os.remove( str(pullpath) + "newreleases.txt" )
    pullitcheck(forcecheck=forcecheck)
Example #51
0
    def get_artwork_from_cache(self, ComicID=None, imageURL=None):
        '''
        Pass a comicvine id to this function (either ComicID or IssueID)
        '''

        self.query_type = 'artwork'

        if ComicID:
            self.id = ComicID
            self.id_type = 'comic'
        else:
            self.id = IssueID
            self.id_type = 'issue'

        if self._exists('artwork') and self._is_current(
                filename=self.artwork_files[0]):
            return self.artwork_files[0]
        else:
            # we already have the image for the comic in the sql db. Simply retrieve it, and save it.
            image_url = imageURL
            logger.debug('Retrieving comic image from: ' + image_url)
            try:
                artwork = urllib2.urlopen(image_url, timeout=20).read()
            except Exception, e:
                logger.error('Unable to open url "' + image_url +
                             '". Error: ' + str(e))
                artwork = None

            if artwork:

                # Make sure the artwork dir exists:
                if not os.path.isdir(self.path_to_art_cache):
                    try:
                        os.makedirs(self.path_to_art_cache)
                    except Exception, e:
                        logger.error(
                            'Unable to create artwork cache dir. Error: ' +
                            str(e))
                        self.artwork_errors = True
                        self.artwork_url = image_url
                #Delete the old stuff
                for artwork_file in self.artwork_files:
                    try:
                        os.remove(artwork_file)
                    except:
                        logger.error('Error deleting file from the cache: ' +
                                     artwork_file)

                ext = os.path.splitext(image_url)[1]

                artwork_path = os.path.join(
                    self.path_to_art_cache,
                    self.id + '.' + helpers.today() + ext)
                try:
                    f = open(artwork_path, 'wb')
                    f.write(artwork)
                    f.close()
                except Exception, e:
                    logger.error('Unable to write to the cache dir: ' + str(e))
                    self.artwork_errors = True
                    self.artwork_url = image_url
Example #52
0
        if pid == 0:
            pass
        else:
            # Exit the parent process
            logger.debug('Forking once...')
            os._exit(0)
    except OSError, e:
        sys.exit("1st fork failed: %s [%d]" % (e.strerror, e.errno))
        
    os.setsid()

    # Do second fork
    try:
        pid = os.fork()
        if pid > 0:
            logger.debug('Forking twice...')
            os._exit(0) # Exit second parent process
    except OSError, e:
        sys.exit("2nd fork failed: %s [%d]" % (e.strerror, e.errno))

    os.chdir("/")
    os.umask(0)
    
    si = open('/dev/null', "r")
    so = open('/dev/null', "a+")
    se = open('/dev/null', "a+")
    
    os.dup2(si.fileno(), sys.stdin.fileno())
    os.dup2(so.fileno(), sys.stdout.fileno())
    os.dup2(se.fileno(), sys.stderr.fileno())
Example #53
0
    def searchit(self):
        #self.searchterm is a tuple containing series name, issue number, volume and publisher.
        series_search = self.searchterm['series']
        comic_id = self.searchterm['id']

        annualize = False
        if 'Annual' in series_search:
            series_search = re.sub(' Annual', '', series_search).strip()
            annualize = True
        issue_search = self.searchterm['issue']
        volume_search = self.searchterm['volume']
        publisher_search = self.searchterm['publisher']
        spl = [x for x in self.publisher_list if x in publisher_search]
        for x in spl:
            publisher_search = re.sub(x, '', publisher_search).strip()
        logger.info('publisher search set to : ' + publisher_search)

        chk_id = None
        # lookup the ComicID in the 32p sqlite3 table to pull the series_id to use.
        if comic_id:
            chk_id = helpers.checkthe_id(comic_id)

        if any([not chk_id, mylar.DEEP_SEARCH_32P is True]):
            #generate the dynamic name of the series here so we can match it up
            as_d = filechecker.FileChecker()
            as_dinfo = as_d.dynamic_replace(series_search)
            mod_series = re.sub('\|','', as_dinfo['mod_seriesname']).strip()
            as_puinfo = as_d.dynamic_replace(publisher_search)
            pub_series = as_puinfo['mod_seriesname']

            logger.info('series_search: ' + series_search)

            if '/' in series_search:
                series_search = series_search[:series_search.find('/')]
            if ':' in series_search:
                series_search = series_search[:series_search.find(':')]
            if ',' in series_search:
                series_search = series_search[:series_search.find(',')]

            if not mylar.SEARCH_32P:
                url = 'https://walksoftly.itsaninja.party/serieslist.php'
                params = {'series': re.sub('\|','', mod_series.lower()).strip()} #series_search}
                try:
                    t = requests.get(url, params=params, verify=True, headers={'USER-AGENT': mylar.USER_AGENT[:mylar.USER_AGENT.find('/')+7] + mylar.USER_AGENT[mylar.USER_AGENT.find('(')+1]})
                except requests.exceptions.RequestException as e:
                    logger.warn(e)
                    return "no results"

                if t.status_code == '619':
                    logger.warn('[' + str(t.status_code) + '] Unable to retrieve data from site.')
                    return "no results"
                elif t.status_code == '999':
                    logger.warn('[' + str(t.status_code) + '] No series title was provided to the search query.')
                    return "no results"

                try:
                    results = t.json()
                except:
                    results = t.text

                if len(results) == 0:
                    logger.warn('No results found for search on 32P.')
                    return "no results"

        with cfscrape.create_scraper() as s:
            s.headers = self.headers
            cj = LWPCookieJar(os.path.join(mylar.CACHE_DIR, ".32p_cookies.dat"))
            cj.load()
            s.cookies = cj
            data = []
            pdata = []
            pubmatch = False

            if any([not chk_id, mylar.DEEP_SEARCH_32P is True]):
                if mylar.SEARCH_32P:
                    url = 'https://32pag.es/torrents.php' #?action=serieslist&filter=' + series_search #&filter=F
                    params = {'action': 'serieslist', 'filter': series_search}
                    time.sleep(1)  #just to make sure we don't hammer, 1s pause.
                    t = s.get(url, params=params, verify=True, allow_redirects=True)
                    soup = BeautifulSoup(t.content, "html.parser")
                    results = soup.find_all("a", {"class":"object-qtip"},{"data-type":"torrentgroup"})

                for r in results:
                    if mylar.SEARCH_32P:
                        torrentid = r['data-id']
                        torrentname = r.findNext(text=True)
                        torrentname = torrentname.strip()
                    else:
                        torrentid = r['id']
                        torrentname = r['series']

                    as_d = filechecker.FileChecker()
                    as_dinfo = as_d.dynamic_replace(torrentname)
                    seriesresult = re.sub('\|','', as_dinfo['mod_seriesname']).strip()
                    #seriesresult = as_dinfo['mod_seriesname']
                    logger.info('searchresult: ' + seriesresult + ' --- ' + mod_series + '[' + publisher_search + ']')
                    if seriesresult == mod_series:
                        logger.info('[MATCH] ' + torrentname + ' [' + str(torrentid) + ']')
                        data.append({"id":      torrentid,
                                     "series":  torrentname})
                    elif publisher_search in seriesresult:
                        logger.info('publisher match.')
                        tmp_torrentname = re.sub(publisher_search, '', seriesresult).strip()
                        as_t = filechecker.FileChecker()
                        as_tinfo = as_t.dynamic_replace(tmp_torrentname)
                        logger.info('tmp_torrentname:' + tmp_torrentname)
                        logger.info('as_tinfo:' + as_tinfo['mod_seriesname'])
                        if re.sub('\|', '', as_tinfo['mod_seriesname']).strip() == mod_series:
                            logger.info('[MATCH] ' + torrentname + ' [' + str(torrentid) + ']')
                            pdata.append({"id":      torrentid,
                                          "series":  torrentname})
                            pubmatch = True

                logger.info(str(len(data)) + ' series listed for searching that match.')
            else:
                logger.info('Exact series ID already discovered previously. Setting to :' + chk_id['series'] + '[' + str(chk_id['id']) + ']')
                pdata.append({"id":     chk_id['id'],
                              "series": chk_id['series']})
                pubmatch = True

            if all([len(data) == 0, len(pdata) == 0]):
                return "no results"
            else:
                dataset = []
                if len(data) > 0:
                    dataset += data
                if len(pdata) > 0:
                    dataset += pdata
                logger.info('dataset: %s' % dataset)
                logger.info(str(len(dataset)) + ' series match the tile being searched for on 32P...')

            if chk_id is None and any([len(data) == 1, len(pdata) == 1]):
                #update the 32p_reference so we avoid doing a url lookup next time
                helpers.checkthe_id(comic_id, dataset)
            else:
                logger.debug('Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.')

            results32p = []
            resultlist = {}

            for x in dataset:
                #for 0-day packs, issue=week#, volume=month, id=0-day year pack
                payload = {'action': 'groupsearch',
                           'id':     x['id'], #searchid,
                           'issue':  issue_search}
                #in order to match up against 0-day stuff, volume has to be none at this point
                #when doing other searches tho, this should be allowed to go through
                #if all([volume_search != 'None', volume_search is not None]):
                #    payload.update({'volume': re.sub('v', '', volume_search).strip()})

                logger.info('payload: ' + str(payload))
                url = 'https://32pag.es/ajax.php'
                time.sleep(1)  #just to make sure we don't hammer, 1s pause.
                try:
                    d = s.post(url, params=payload, verify=True, allow_redirects=True)
                    #logger.debug(self.module + ' Reply from AJAX: \n %s', d.text)
                except Exception as e:
                    logger.info(self.module + ' Could not POST URL %s', url)

                try:
                    searchResults = d.json()
                except:
                    searchResults = d.text
                    logger.debug(self.module + ' Search Result did not return valid JSON, falling back on text: %s', searchResults.text)
                    return False

                #logger.debug(self.module + " Search Result: %s", searchResults)
                if searchResults['status'] == 'success' and searchResults['count'] > 0:
                    logger.info('successfully retrieved ' + str(searchResults['count']) + ' search results.')
                    for a in searchResults['details']:
                        results32p.append({'link':      a['id'],
                                           'title':     self.searchterm['series'] + ' v' + a['volume'] + ' #' + a['issues'],
                                           'filesize':  a['size'],
                                           'issues':     a['issues'],
                                           'pack':      a['pack'],
                                           'format':    a['format'],
                                           'language':  a['language'],
                                           'seeders':   a['seeders'],
                                           'leechers':  a['leechers'],
                                           'scanner':   a['scanner'],
                                           'chkit':     {'id': x['id'], 'series': x['series']},
                                           'pubdate':   datetime.datetime.fromtimestamp(float(a['upload_time'])).strftime('%a, %d %b %Y %H:%M:%S'),
                                           'int_pubdate': float(a['upload_time'])})


            if len(results32p) > 0:
                resultlist['entries'] = sorted(results32p, key=itemgetter('pack','title'), reverse=False)
            else:
                resultlist = 'no results'

        return resultlist
Example #54
0
    def _Comic(self, **kwargs):
        index = 0
        if 'index' in kwargs:
            index = int(kwargs['index'])
        myDB = db.DBConnection()
        if 'comicid' not in kwargs:
            self.data = self._error_with_message('No ComicID Provided')
            return
        links = []
        entries = []
        comic = myDB.selectone('SELECT * from comics where ComicID=?',
                               (kwargs['comicid'], )).fetchone()
        if not comic:
            self.data = self._error_with_message('Comic Not Found')
            return
        issues = self._dic_from_query('SELECT * from issues WHERE ComicID="' +
                                      kwargs['comicid'] +
                                      '"order by Int_IssueNumber DESC')
        if mylar.CONFIG.ANNUALS_ON:
            annuals = self._dic_from_query(
                'SELECT * FROM annuals WHERE ComicID="' + kwargs['comicid'] +
                '"')
        else:
            annuals = []
        for annual in annuals:
            issues.append(annual)
        issues = [x for x in issues if x['Location']]
        if index <= len(issues):
            subset = issues[index:(index + self.PAGE_SIZE)]
            for issue in subset:
                if 'DateAdded' in issue and issue['DateAdded']:
                    updated = issue['DateAdded']
                else:
                    updated = issue['ReleaseDate']
                image = None
                thumbnail = None
                if 'DateAdded' in issue:
                    title = escape('%s - %s' %
                                   (issue['Issue_Number'], issue['IssueName']))
                    image = issue['ImageURL_ALT']
                    thumbnail = issue['ImageURL']
                else:
                    title = escape('Annual %s - %s' %
                                   (issue['Issue_Number'], issue['IssueName']))

                fileloc = os.path.join(comic['ComicLocation'],
                                       issue['Location'])
                if not os.path.isfile(fileloc):
                    logger.debug("Missing File: %s" % (fileloc))
                    continue
                metainfo = None
                if mylar.CONFIG.OPDS_METAINFO:
                    metainfo = mylar.helpers.IssueDetails(fileloc)
                if not metainfo:
                    metainfo = [{'writer': None, 'summary': ''}]
                entries.append({
                    'title':
                    title,
                    'id':
                    escape('comic:%s - %s' %
                           (issue['ComicName'], issue['Issue_Number'])),
                    'updated':
                    updated,
                    'content':
                    escape('%s' % (metainfo[0]['summary'])),
                    'href':
                    '%s?cmd=Issue&amp;issueid=%s&amp;file=%s' %
                    (self.opdsroot, quote_plus(issue['IssueID']),
                     quote_plus(issue['Location'].encode('utf-8'))),
                    'kind':
                    'acquisition',
                    'rel':
                    'file',
                    'author':
                    metainfo[0]['writer'],
                    'image':
                    image,
                    'thumbnail':
                    thumbnail,
                })

        feed = {}
        comicname = '%s' % (escape(comic['ComicName']))
        feed['title'] = 'Mylar OPDS - %s' % (comicname)
        feed['id'] = escape('comic:%s (%s)' %
                            (comic['ComicName'], comic['ComicYear']))
        feed['updated'] = comic['DateAdded']
        links.append(
            getLink(
                href=self.opdsroot,
                type=
                'application/atom+xml; profile=opds-catalog; kind=navigation',
                rel='start',
                title='Home'))
        links.append(
            getLink(
                href='%s?cmd=Comic&amp;comicid=%s' %
                (self.opdsroot, quote_plus(kwargs['comicid'])),
                type=
                'application/atom+xml; profile=opds-catalog; kind=navigation',
                rel='self'))
        if len(issues) > (index + self.PAGE_SIZE):
            links.append(
                getLink(
                    href='%s?cmd=Comic&amp;comicid=%s&amp;index=%s' %
                    (self.opdsroot, quote_plus(
                        kwargs['comicid']), index + self.PAGE_SIZE),
                    type=
                    'application/atom+xml; profile=opds-catalog; kind=navigation',
                    rel='next'))
        if index >= self.PAGE_SIZE:
            links.append(
                getLink(
                    href='%s?cmd=Comic&amp;comicid=%s&amp;index=%s' %
                    (self.opdsroot, quote_plus(
                        kwargs['comicid']), index - self.PAGE_SIZE),
                    type=
                    'application/atom+xml; profile=opds-catalog; kind=navigation',
                    rel='previous'))

        feed['links'] = links
        feed['entries'] = entries
        self.data = feed
        return
Example #55
0
    def searchit(self):
        chk_id = None
        #logger.info('searchterm: %s' % self.searchterm)
        #self.searchterm is a tuple containing series name, issue number, volume and publisher.
        series_search = self.searchterm['series']
        issue_search = self.searchterm['issue']
        volume_search = self.searchterm['volume']

        if series_search.startswith('0-Day Comics Pack'):
            #issue = '21' = WED, #volume='2' = 2nd month
            torrentid = 22247  #2018
            publisher_search = None  #'2'  #2nd month
            comic_id = None
        elif all([
                self.searchterm['torrentid_32p'] is not None,
                self.searchterm['torrentid_32p'] != 'None'
        ]):
            torrentid = self.searchterm['torrentid_32p']
            comic_id = self.searchterm['id']
            publisher_search = self.searchterm['publisher']
        else:
            torrentid = None
            comic_id = self.searchterm['id']

            annualize = False
            if 'annual' in series_search.lower():
                series_search = re.sub(' annual', '',
                                       series_search.lower()).strip()
                annualize = True
            publisher_search = self.searchterm['publisher']
            spl = [x for x in self.publisher_list if x in publisher_search]
            for x in spl:
                publisher_search = re.sub(x, '', publisher_search).strip()
            #logger.info('publisher search set to : %s' % publisher_search)

            # lookup the ComicID in the 32p sqlite3 table to pull the series_id to use.
            if comic_id:
                chk_id = helpers.checkthe_id(comic_id)

            if any([chk_id is None, mylar.CONFIG.DEEP_SEARCH_32P is True]):
                #generate the dynamic name of the series here so we can match it up
                as_d = filechecker.FileChecker()
                as_dinfo = as_d.dynamic_replace(series_search)
                mod_series = re.sub('\|', '',
                                    as_dinfo['mod_seriesname']).strip()
                as_puinfo = as_d.dynamic_replace(publisher_search)
                pub_series = as_puinfo['mod_seriesname']

                logger.fdebug('series_search: %s' % series_search)

                if '/' in series_search:
                    series_search = series_search[:series_search.find('/')]
                if ':' in series_search:
                    series_search = series_search[:series_search.find(':')]
                if ',' in series_search:
                    series_search = series_search[:series_search.find(',')]

                logger.fdebug('config.search_32p: %s' %
                              mylar.CONFIG.SEARCH_32P)
                if mylar.CONFIG.SEARCH_32P is False:
                    url = 'https://walksoftly.itsaninja.party/serieslist.php'
                    params = {
                        'series': re.sub('\|', '', mod_series.lower()).strip()
                    }  #series_search}
                    logger.fdebug('search query: %s' %
                                  re.sub('\|', '', mod_series.lower()).strip())
                    try:
                        t = requests.get(
                            url,
                            params=params,
                            verify=True,
                            headers={
                                'USER-AGENT':
                                mylar.USER_AGENT[:mylar.USER_AGENT.find('/') +
                                                 7] +
                                mylar.USER_AGENT[mylar.USER_AGENT.find('(') +
                                                 1]
                            })
                    except requests.exceptions.RequestException as e:
                        logger.warn(e)
                        return "no results"

                    if t.status_code == '619':
                        logger.warn('[%s] Unable to retrieve data from site.' %
                                    t.status_code)
                        return "no results"
                    elif t.status_code == '999':
                        logger.warn(
                            '[%s] No series title was provided to the search query.'
                            % t.status_code)
                        return "no results"

                    try:
                        results = t.json()
                    except:
                        results = t.text

                    if len(results) == 0:
                        logger.warn('No results found for search on 32P.')
                        return "no results"

#        with cfscrape.create_scraper(delay=15) as s:
#            s.headers = self.headers
#            cj = LWPCookieJar(os.path.join(mylar.CONFIG.SECURE_DIR, ".32p_cookies.dat"))
#            cj.load()
#            s.cookies = cj
        data = []
        pdata = []
        pubmatch = False

        if any([
                series_search.startswith('0-Day Comics Pack'), torrentid
                is not None
        ]):
            data.append({"id": torrentid, "series": series_search})
        else:
            if any([not chk_id, mylar.CONFIG.DEEP_SEARCH_32P is True]):
                if mylar.CONFIG.SEARCH_32P is True:
                    url = 'https://32pag.es/torrents.php'  #?action=serieslist&filter=' + series_search #&filter=F
                    params = {'action': 'serieslist', 'filter': series_search}
                    time.sleep(
                        1)  #just to make sure we don't hammer, 1s pause.
                    t = self.session.get(url,
                                         params=params,
                                         verify=True,
                                         allow_redirects=True)
                    soup = BeautifulSoup(t.content, "html.parser")
                    results = soup.find_all("a", {"class": "object-qtip"},
                                            {"data-type": "torrentgroup"})

                for r in results:
                    if mylar.CONFIG.SEARCH_32P is True:
                        torrentid = r['data-id']
                        torrentname = r.findNext(text=True)
                        torrentname = torrentname.strip()
                    else:
                        torrentid = r['id']
                        torrentname = r['series']

                    as_d = filechecker.FileChecker()
                    as_dinfo = as_d.dynamic_replace(torrentname)
                    seriesresult = re.sub('\|', '',
                                          as_dinfo['mod_seriesname']).strip()
                    logger.fdebug('searchresult: %s --- %s [%s]' %
                                  (seriesresult, mod_series, publisher_search))
                    if seriesresult.lower() == mod_series.lower():
                        logger.fdebug('[MATCH] %s [%s]' %
                                      (torrentname, torrentid))
                        data.append({"id": torrentid, "series": torrentname})
                    elif publisher_search.lower() in seriesresult.lower():
                        logger.fdebug('[MATCH] Publisher match.')
                        tmp_torrentname = re.sub(publisher_search.lower(), '',
                                                 seriesresult.lower()).strip()
                        as_t = filechecker.FileChecker()
                        as_tinfo = as_t.dynamic_replace(tmp_torrentname)
                        if re.sub('\|', '', as_tinfo['mod_seriesname']).strip(
                        ) == mod_series.lower():
                            logger.fdebug('[MATCH] %s [%s]' %
                                          (torrentname, torrentid))
                            pdata.append({
                                "id": torrentid,
                                "series": torrentname
                            })
                            pubmatch = True

                logger.fdebug('%s series listed for searching that match.' %
                              len(data))
            else:
                logger.fdebug(
                    'Exact series ID already discovered previously. Setting to : %s [%s]'
                    % (chk_id['series'], chk_id['id']))
                pdata.append({"id": chk_id['id'], "series": chk_id['series']})
                pubmatch = True

        if all([len(data) == 0, len(pdata) == 0]):
            return "no results"
        else:
            dataset = []
            if len(data) > 0:
                dataset += data
            if len(pdata) > 0:
                dataset += pdata
            logger.fdebug(
                str(len(dataset)) +
                ' series match the tile being searched for on 32P...')

        if all([
                chk_id is None,
                not series_search.startswith('0-Day Comics Pack'),
                self.searchterm['torrentid_32p'] is not None,
                self.searchterm['torrentid_32p'] != 'None'
        ]) and any([len(data) == 1, len(pdata) == 1]):
            #update the 32p_reference so we avoid doing a url lookup next time
            helpers.checkthe_id(comic_id, dataset)
        else:
            if all([
                    not series_search.startswith('0-Day Comics Pack'),
                    self.searchterm['torrentid_32p'] is not None,
                    self.searchterm['torrentid_32p'] != 'None'
            ]):
                pass
            else:
                logger.debug(
                    'Unable to properly verify reference on 32P - will update the 32P reference point once the issue has been successfully matched against.'
                )

        results32p = []
        resultlist = {}

        for x in dataset:
            #for 0-day packs, issue=week#, volume=month, id=0-day year pack (ie.issue=21&volume=2 for feb.21st)
            payload = {
                "action": "groupsearch",
                "id": x['id'],  #searchid,
                "issue": issue_search
            }
            #in order to match up against 0-day stuff, volume has to be none at this point
            #when doing other searches tho, this should be allowed to go through
            #if all([volume_search != 'None', volume_search is not None]):
            #    payload.update({'volume': re.sub('v', '', volume_search).strip()})
            if series_search.startswith('0-Day Comics Pack'):
                payload.update({"volume": volume_search})

            payload = json.dumps(payload)
            payload = json.loads(payload)

            logger.fdebug('payload: %s' % payload)
            url = 'https://32pag.es/ajax.php'
            time.sleep(1)  #just to make sure we don't hammer, 1s pause.
            try:
                d = self.session.get(url,
                                     params=payload,
                                     verify=True,
                                     allow_redirects=True)
            except Exception as e:
                logger.error('%s [%s] Could not POST URL %s' %
                             (self.module, e, url))

            try:
                searchResults = d.json()
            except Exception as e:
                searchResults = d.text
                logger.debug(
                    '[%s] %s Search Result did not return valid JSON, falling back on text: %s'
                    % (e, self.module, searchResults.text))
                return False

            if searchResults[
                    'status'] == 'success' and searchResults['count'] > 0:
                logger.fdebug('successfully retrieved %s search results' %
                              searchResults['count'])
                for a in searchResults['details']:
                    if series_search.startswith('0-Day Comics Pack'):
                        title = series_search
                    else:
                        title = self.searchterm['series'] + ' v' + a[
                            'volume'] + ' #' + a['issues']
                    results32p.append({
                        'link':
                        a['id'],
                        'title':
                        title,
                        'filesize':
                        a['size'],
                        'issues':
                        a['issues'],
                        'pack':
                        a['pack'],
                        'format':
                        a['format'],
                        'language':
                        a['language'],
                        'seeders':
                        a['seeders'],
                        'leechers':
                        a['leechers'],
                        'scanner':
                        a['scanner'],
                        'chkit': {
                            'id': x['id'],
                            'series': x['series']
                        },
                        'pubdate':
                        datetime.datetime.fromtimestamp(float(
                            a['upload_time'])).strftime(
                                '%a, %d %b %Y %H:%M:%S'),
                        'int_pubdate':
                        float(a['upload_time'])
                    })

            else:
                logger.fdebug('32P did not return any valid search results.')

        if len(results32p) > 0:
            resultlist['entries'] = sorted(results32p,
                                           key=itemgetter('pack', 'title'),
                                           reverse=False)
            logger.debug('%s Resultslist: %s' % (self.module, resultlist))
        else:
            resultlist = 'no results'

        return resultlist
Example #56
0
                        else:
                            grab_dst = os.path.join(grdst, ofilename)

                        self._log("Destination Path : " + grab_dst)
                        logger.info("Destination Path : " + grab_dst)
                        grab_src = os.path.join(self.nzb_folder, ofilename)
                        self._log("Source Path : " + grab_src)
                        logger.info("Source Path : " + grab_src)

                        logger.info("Moving " + str(ofilename) + " into directory : " + str(grdst))

                        try:
                            shutil.move(grab_src, grab_dst)
                        except (OSError, IOError):
                            self._log("Failed to move directory - check directories and manually re-run.")
                            logger.debug("Failed to move directory - check directories and manually re-run.")
                            return
                        #tidyup old path
                        try:
                            shutil.rmtree(self.nzb_folder)
                        except (OSError, IOError):
                            self._log("Failed to remove temporary directory.")
                            logger.debug("Failed to remove temporary directory - check directory and manually re-run.")
                            return

                        logger.debug("Removed temporary directory : " + str(self.nzb_folder))
                        self._log("Removed temporary directory : " + self.nzb_folder)
                        #delete entry from nzblog table
                        myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])

                        if 'S' in issueid: