Exemplo n.º 1
0
def fetchURL(URL, headers=None, retry=True):
    """ Return the result of fetching a URL and True if success
        Otherwise return error message and False
        Allow one retry on timeout by default"""
    request = urllib2.Request(URL)
    if lazylibrarian.PROXY_HOST:
        request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE)
    if headers is None:
        # some sites insist on having a user-agent, default is to add one
        # if you don't want any headers, send headers=[]
        request.add_header('User-Agent', USER_AGENT)
    else:
        for item in headers:
            request.add_header(item, headers[item])
    try:
        resp = urllib2.urlopen(request, timeout=30)
        if str(resp.getcode()).startswith("2"):  # (200 OK etc)
            try:
                result = resp.read()
            except socket.error as e:
                return str(e), False
            return result, True
        return str(resp.getcode()), False
    except socket.timeout as e:
        if not retry:
            logger.error(u"fetchURL: Timeout getting response from %s" % URL)
            return str(e), False
        logger.warn(u"fetchURL: retrying - got timeout on %s" % URL)
        result, success = fetchURL(URL, headers=headers, retry=False)
        return result, success
    except (urllib2.HTTPError, urllib2.URLError, ssl.SSLError) as e:
        if hasattr(e, 'reason'):
            return e.reason, False
        return str(e), False
Exemplo n.º 2
0
def fetchURL(URL):
    request = urllib2.Request(URL)
    if lazylibrarian.PROXY_HOST:
        request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE)
    # google insists on having a user-agent
    request.add_header('User-Agent', USER_AGENT)
    try:
        resp = urllib2.urlopen(request, timeout=30)
        if str(resp.getcode()).startswith("2"):
            # (200 OK etc)
            try:
                result = resp.read()
            except socket.error as e:
                return e, False
            return result, True
        else:
            return str(resp), False  
    except (socket.timeout) as e:
        logger.warn(u"fetchURL: retrying - got timeout on %s" % URL)
        try:
            resp = urllib2.urlopen(request, timeout=30)  # don't get stuck
            if str(resp.getcode()).startswith("2"):
                # (200 OK etc)
                try:
                    result = resp.read()
                except socket.error as e:
                    return e, False
                return result, True
            else:
                return str(resp), False  
        except (urllib2.URLError, socket.timeout) as e:
            logger.error(u"fetchURL: Error getting response for %s: %s" % (URL, e))
            return e, False                    
    except (urllib2.HTTPError, urllib2.URLError) as e:
        return e.reason, False
Exemplo n.º 3
0
def getLatestVersionaFromGit():
    latest_version = 'Unknown'

    # Can only work for non Windows driven installs, so check install type
    if lazylibrarian.INSTALL_TYPE == 'win':
        logger.debug('(getLatestVersionaFromGit) Code Error - Windows install - should not be called under a windows install')
        latest_version = 'WINDOWS INSTALL'
    else:
        # check current branch value of the local git repo as folks may pull from a branch not master
        branch = lazylibrarian.CURRENT_BRANCH

        if (branch == 'InvalidBranch'):
            logger.debug('(getLatestVersionaFromGit) - Failed to get a valid branch name from local repo')
        else:

            # Get the latest commit available from github
            url = 'https://api.github.com/repos/%s/%s/commits/%s' % (lazylibrarian.GIT_USER, lazylibrarian.GIT_REPO, lazylibrarian.GIT_BRANCH)
            logger.debug('(getLatestVersionaFromGit) Retrieving latest version information from github command=[%s]' % url)
            try:
                result = urllib2.urlopen(url).read()
                git = simplejson.JSONDecoder().decode(result)
                latest_version = git['sha']
                logger.debug('(getLatestVersionaFromGit) Branch [%s] has Latest Version has been set to [%s]' % (branch, latest_version))
            except:
                logger.warn('(getLatestVersionaFromGit) Could not get the latest commit from github')
                latest_version = 'Not_Available_From_GitHUB'

    return latest_version
Exemplo n.º 4
0
def search_tor_book(books=None, mags=None):
    if not(lazylibrarian.USE_TOR):
        logger.warn('Torrent search is disabled')
        return
    # rename this thread
    threading.currentThread().name = "SEARCHTORBOOKS"
    myDB = database.DBConnection()
    searchlist = []
    #searchlist1 = []

    if books is None:
        # We are performing a backlog search
        searchbooks = myDB.select('SELECT BookID, AuthorName, Bookname from books WHERE Status="Wanted"')

        # Clear cache
        providercache = os.path.join(lazylibrarian.DATADIR, ".ProviderCache")
        if os.path.exists(providercache):
            try:
                shutil.rmtree(providercache)
                os.mkdir(providercache)
            except OSError, e:
                logger.error('Failed to clear cache: ' + str(e))

        # Clearing throttling timeouts
        t = SimpleCache.ThrottlingProcessor()
        t.lastRequestTime.clear()
Exemplo n.º 5
0
def NewzNabPlus(book=None, host=None, api_key=None, searchType=None, searchMode=None):

    # logger.info('[NewzNabPlus] Searching term [%s] for author [%s] and title [%s] on host [%s] for a [%s] item' % (book['searchterm'], book['authorName'], book['bookName'], host, searchType))
    logger.debug('[NewzNabPlus] searchType [%s] with Host [%s] mode [%s] using api [%s] for item [%s]' % (searchType, host, searchMode, api_key, str(book)))

    results = []

    params = ReturnSearchTypeStructure(api_key, book, searchType, searchMode)

    if not str(host)[:4] == "http":
        host = 'http://' + host

    URL = host + '/api?' + urllib.urlencode(params)

    try:
        request = urllib2.Request(URL)
        if lazylibrarian.PROXY_HOST:
            request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE)
        request.add_header('User-Agent', common.USER_AGENT)
        opener = urllib2.build_opener(SimpleCache.CacheHandler(".ProviderCache"), SimpleCache.ThrottlingProcessor(5))
        resp = opener.open(request)

        try:
            data = ElementTree.parse(resp)
        except (urllib2.URLError, IOError, EOFError), e:
            logger.warn('Error fetching data from %s: %s' % (host, e))
            data = None

    except Exception, e:
        logger.error("Error 403 opening url %s" % e)
        data = None
Exemplo n.º 6
0
def DownloadMethod(bookid=None, nzbprov=None, nzbtitle=None, nzburl=None):

    myDB = database.DBConnection()

    if lazylibrarian.SAB_HOST and not lazylibrarian.BLACKHOLE:
        download = sabnzbd.SABnzbd(nzbtitle, nzburl)

    elif lazylibrarian.BLACKHOLE:

        try:
            nzbfile = urllib2.urlopen(nzburl, timeout=30).read()

        except urllib2.URLError, e:
            logger.warn('Error fetching nzb from url: ' + nzburl + ' %s' % e)

        nzbname = str.replace(nzbtitle, ' ', '_') + '.nzb'
        nzbpath = os.path.join(lazylibrarian.BLACKHOLEDIR, nzbname)

        try:
            f = open(nzbpath, 'w')
            f.write(nzbfile)
            f.close()
            logger.info('NZB file saved to: ' + nzbpath)
            download = True
        except Exception, e:
            logger.error('%s not writable, NZB not saved. Error: %s' % (nzbpath, e))
            download = False
Exemplo n.º 7
0
def removeTorrent(torrentid, remove_data=False):

    method = 'torrent-get'
    arguments = {'ids': [torrentid], 'fields': ['isFinished', 'name']}

    response, _ = torrentAction(method, arguments)  # type: dict
    if not response:
        return False

    try:
        finished = response['arguments']['torrents'][0]['isFinished']
        name = response['arguments']['torrents'][0]['name']

        if finished:
            logger.debug('%s has finished seeding, removing torrent and data' % name)
            method = 'torrent-remove'
            if remove_data:
                arguments = {'delete-local-data': True, 'ids': [torrentid]}
            else:
                arguments = {'ids': [torrentid]}
            _, _ = torrentAction(method, arguments)
            return True
        else:
            logger.debug('%s has not finished seeding yet, torrent will not be removed' % name)
    except IndexError:
        # no torrents, already removed?
        return True
    except Exception as e:
        logger.warn('Unable to remove torrent %s, %s %s' % (torrentid, type(e).__name__, str(e)))
        return False

    return False
Exemplo n.º 8
0
def getServer():
    host = lazylibrarian.CONFIG['RTORRENT_HOST']
    if not host:
        logger.error("rtorrent error: No host found, check your config")
        return False

    if not host.startswith("http://") and not host.startswith("https://"):
        host = 'http://' + host
    if host.endswith('/'):
        host = host[:-1]

    if lazylibrarian.CONFIG['RTORRENT_USER']:
        user = lazylibrarian.CONFIG['RTORRENT_USER']
        password = lazylibrarian.CONFIG['RTORRENT_PASS']
        parts = host.split('://')
        host = parts[0] + '://' + user + ':' + password + '@' + parts[1]

    try:
        socket.setdefaulttimeout(20)  # so we don't freeze if server is not there
        server = xmlrpc_client.ServerProxy(host)
        result = server.system.client_version()
        socket.setdefaulttimeout(None)  # reset timeout
        logger.debug("rTorrent client version = %s" % result)
    except Exception as e:
        socket.setdefaulttimeout(None)  # reset timeout if failed
        logger.error("xmlrpc_client error: %s" % repr(e))
        return False
    if result:
        return server
    else:
        logger.warn('No response from rTorrent server')
        return False
Exemplo n.º 9
0
    def get_shelf_list(self):
        global consumer, client, token, user_id
        if not lazylibrarian.CONFIG['GR_API'] or not lazylibrarian.CONFIG['GR_SECRET'] or not \
                lazylibrarian.CONFIG['GR_OAUTH_TOKEN'] or not lazylibrarian.CONFIG['GR_OAUTH_SECRET']:
            logger.warn("Goodreads get shelf error: Please authorise first")
            return []
        else:
            #
            # loop over each page of shelves
            #     loop over each shelf
            #         add shelf to list
            #
            consumer = oauth.Consumer(key=str(lazylibrarian.CONFIG['GR_API']),
                                      secret=str(lazylibrarian.CONFIG['GR_SECRET']))
            token = oauth.Token(lazylibrarian.CONFIG['GR_OAUTH_TOKEN'], lazylibrarian.CONFIG['GR_OAUTH_SECRET'])
            client = oauth.Client(consumer, token)
            user_id = self.getUserId()

            current_page = 0
            shelves = []
            page_shelves = 1
            while page_shelves:
                current_page = current_page + 1
                page_shelves = 0
                shelf_template = Template('${base}/shelf/list.xml?user_id=${user_id}&key=${key}&page=${page}')
                body = urlencode({})
                headers = {'Content-Type': 'application/x-www-form-urlencoded'}
                request_url = shelf_template.substitute(base='https://www.goodreads.com', user_id=user_id,
                                                        page=current_page, key=lazylibrarian.CONFIG['GR_API'])
                gr_api_sleep()
                try:
                    response, content = client.request(request_url, 'GET', body, headers)
                except Exception as e:
                    logger.error("Exception in client.request: %s %s" % (type(e).__name__, traceback.format_exc()))
                    return shelves

                if not response['status'].startswith('2'):
                    logger.error('Failure status: %s for page %s' % (response['status'], current_page))
                    if lazylibrarian.LOGLEVEL & lazylibrarian.log_grsync:
                        logger.debug(request_url)
                else:
                    xmldoc = xml.dom.minidom.parseString(content)

                    shelf_list = xmldoc.getElementsByTagName('shelves')[0]
                    for item in shelf_list.getElementsByTagName('user_shelf'):
                        shelf_name = item.getElementsByTagName('name')[0].firstChild.nodeValue
                        shelf_count = item.getElementsByTagName('book_count')[0].firstChild.nodeValue
                        shelf_exclusive = item.getElementsByTagName('exclusive_flag')[0].firstChild.nodeValue
                        shelves.append({'name': shelf_name, 'books': shelf_count, 'exclusive': shelf_exclusive})
                        page_shelves += 1

                        if lazylibrarian.LOGLEVEL & lazylibrarian.log_grsync:
                            logger.debug('Shelf %s : %s: Exclusive %s' % (shelf_name, shelf_count, shelf_exclusive))

                    if lazylibrarian.LOGLEVEL & lazylibrarian.log_grsync:
                        logger.debug('Found %s shelves on page %s' % (page_shelves, current_page))

            logger.debug('Found %s shelves on %s page%s' % (len(shelves), current_page - 1, plural(current_page - 1)))
            # print shelves
            return shelves
Exemplo n.º 10
0
def NewzNab(searchterm=None, resultlist=None):

    HOST = lazylibrarian.NEWZNAB_HOST

    params = {
        "t": "search",
        "apikey": lazylibrarian.NEWZNAB_API,
        "cat": 7020,
        "q": searchterm
        }

    if not str(HOST)[:4] == "http":
        HOST = 'http://' + HOST

    URL = HOST + '/api?' + urllib.urlencode(params)

    # to debug because of api
    logger.debug(u'Parsing results from <a href="%s">%s</a>' % (URL, lazylibrarian.NEWZNAB_HOST))

    try:
        data = ElementTree.parse(urllib2.urlopen(URL, timeout=20))
        rootxml = data.getroot()
        resultxml = rootxml.getiterator('item')
    except urllib2.URLError, e:
        logger.warn('Error fetching data from %s: %s' % (lazylibrarian.NEWZNAB_HOST, e))
        data = None
Exemplo n.º 11
0
    def action(self, query, args=None):
        with db_lock:

            if not query:
                return

            sqlResult = None
            attempt = 0

            while attempt < 5:

                try:
                    if not args:
                        sqlResult = self.connection.execute(query)
                    else:
                        sqlResult = self.connection.execute(query, args)
                    self.connection.commit()
                    break

                except sqlite3.OperationalError as e:
                    if "unable to open database file" in e.message or "database is locked" in e.message:
                        logger.warn("Database Error: %s" % e)
                        attempt += 1
                        time.sleep(1)
                    else:
                        logger.error("Database error: %s" % e)
                        raise

                except sqlite3.DatabaseError as e:
                    logger.error("Fatal error executing %s :: %s" % (query, e))
                    raise

            return sqlResult
Exemplo n.º 12
0
def export_CSV(search_dir=None, status="Wanted"):
    """ Write a csv file to the search_dir containing all books marked as "Wanted" """

    if not search_dir or os.path.isdir(search_dir) is False:
        logger.warn("Please check Alternate Directory setting")
        return False

    csvFile = os.path.join(search_dir, "%s - %s.csv" % (status, now().replace(':', '-')))

    myDB = database.DBConnection()

    find_status = myDB.select('SELECT * FROM books WHERE Status = "%s"' % status)

    if not find_status:
        logger.warn(u"No books marked as %s" % status)
    else:
        count = 0
        with open(csvFile, 'wb') as csvfile:
            csvwrite = csv.writer(csvfile, delimiter=',',
                                  quotechar='"', quoting=csv.QUOTE_MINIMAL)

            # write headers, change AuthorName BookName BookIsbn to match import csv names (Author, Title, ISBN10)
            csvwrite.writerow(['BookID', 'Author', 'Title', 'ISBN', 'AuthorID'])

            for resulted in find_status:
                logger.debug(u"Exported CSV for book %s" % resulted['BookName'])
                row = ([resulted['BookID'], resulted['AuthorName'], resulted['BookName'],
                        resulted['BookIsbn'], resulted['AuthorID']])
                csvwrite.writerow([("%s" % s).encode(lazylibrarian.SYS_ENCODING) for s in row])
                count = count + 1
        logger.info(u"CSV exported %s book%s to %s" % (count, plural(count), csvFile))
Exemplo n.º 13
0
    def action(self, query, args=None):
        with db_lock:

            if not query:
                return

            sqlResult = None
            attempt = 0

            while attempt < 5:

                try:
                    if not args:
                        # logger.debug(self.filename+": "+query)
                        sqlResult = self.connection.execute(query)
                    else:
                        # logger.debug(self.filename+": "+query+" with args "+str(args))
                        sqlResult = self.connection.execute(query, args)
                    self.connection.commit()
                    break

                except sqlite3.OperationalError, e:
                    if "unable to open database file" in e.message or "database is locked" in e.message:
                        logger.warn('Database Error: %s' % e)
                        attempt += 1
                        time.sleep(1)
                    else:
                        logger.error('Database error: %s' % e)
                        raise

                except sqlite3.DatabaseError, e:
                    logger.error('Fatal error executing %s :: %s' % (query, e))
                    raise
Exemplo n.º 14
0
def getSeriesMembers(seriesID=None):
    """ Ask librarything or goodreads for details on all books in a series
        order, bookname, authorname, workid, authorid
        (workid and authorid are goodreads only)
        Return as a list of lists """
    results = []
    if lazylibrarian.CONFIG['BOOK_API'] == 'GoodReads':
        params = {"format": "xml", "key": lazylibrarian.CONFIG['GR_API']}
        URL = 'https://www.goodreads.com/series/' + seriesID + '?' + urlencode(params)
        try:
            rootxml, in_cache = gr_xml_request(URL)
            if rootxml is None:
                logger.debug("Error requesting series %s" % seriesID)
                return []
        except Exception as e:
            logger.error("%s finding series %s: %s" % (type(e).__name__, seriesID, str(e)))
            return []

        works = rootxml.find('series/series_works')
        books = works.getiterator('series_work')
        if books is None:
            logger.warn('No books found for %s' % seriesID)
            return []
        for book in books:
            mydict = {}
            for mykey, location in [('order', 'user_position'),
                                    ('bookname', 'work/best_book/title'),
                                    ('authorname', 'work/best_book/author/name'),
                                    ('workid', 'work/id'),
                                    ('authorid', 'work/best_book/author/id')
                                    ]:
                if book.find(location) is not None:
                    mydict[mykey] = book.find(location).text
                else:
                    mydict[mykey] = ""
            results.append([mydict['order'], mydict['bookname'], mydict['authorname'],
                            mydict['workid'], mydict['authorid']])
    else:
        data = getBookWork(None, "SeriesPage", seriesID)
        if data:
            try:
                table = data.split('class="worksinseries"')[1].split('</table>')[0]
                rows = table.split('<tr')
                for row in rows:
                    if 'href=' in row:
                        booklink = row.split('href="')[1]
                        bookname = booklink.split('">')[1].split('<')[0]
                        # booklink = booklink.split('"')[0]
                        try:
                            authorlink = row.split('href="')[2]
                            authorname = authorlink.split('">')[1].split('<')[0]
                            # authorlink = authorlink.split('"')[0]
                            order = row.split('class="order">')[1].split('<')[0]
                            results.append([order, bookname, authorname, '', ''])
                        except IndexError:
                            logger.debug('Incomplete data in series table for series %s' % seriesID)
            except IndexError:
                if 'class="worksinseries"' in data:  # error parsing, or just no series data available?
                    logger.debug('Error in series table for series %s' % seriesID)
    return results
Exemplo n.º 15
0
    def create_shelf(self, shelf='lazylibrarian'):
        global consumer, client, token, user_id
        if not lazylibrarian.CONFIG['GR_API'] or not lazylibrarian.CONFIG['GR_SECRET'] or not \
                lazylibrarian.CONFIG['GR_OAUTH_TOKEN'] or not lazylibrarian.CONFIG['GR_OAUTH_SECRET']:
            logger.warn("Goodreads create shelf error: Please authorise first")
            return False, 'Unauthorised'

        consumer = oauth.Consumer(key=str(lazylibrarian.CONFIG['GR_API']),
                                  secret=str(lazylibrarian.CONFIG['GR_SECRET']))
        token = oauth.Token(lazylibrarian.CONFIG['GR_OAUTH_TOKEN'], lazylibrarian.CONFIG['GR_OAUTH_SECRET'])
        client = oauth.Client(consumer, token)
        user_id = self.getUserId()

        # could also pass [featured] [exclusive_flag] [sortable_flag] all default to False
        body = urlencode({'user_shelf[name]': shelf.lower()})
        headers = {'Content-Type': 'application/x-www-form-urlencoded'}
        gr_api_sleep()

        try:
            response, content = client.request('%s/user_shelves.xml' % 'https://www.goodreads.com', 'POST',
                                               body, headers)
        except Exception as e:
            logger.error("Exception in client.request: %s %s" % (type(e).__name__, traceback.format_exc()))
            return False, "Error in client.request: see error log"

        if not response['status'].startswith('2'):
            msg = 'Failure status: %s' % response['status']
            return False, msg
        return True, ''
Exemplo n.º 16
0
    def find_author_id(self, refresh=False):
        author = self.name
        # Goodreads doesn't like initials followed by spaces,
        # eg "M L Hamilton", needs "M. L. Hamilton" or "M.L.Hamilton"
        # but DOES need spaces if not initials eg "Tom.Holt" fails, but "Tom Holt" works
        if author[1] == ' ':
            author = author.replace(' ', '.')
            author = author.replace('..', '.')
        URL = 'http://www.goodreads.com/api/author_url/' + urllib.quote(author) + '?' + urllib.urlencode(self.params)
        logger.debug("Searching for author with name: %s" % author)

        authorlist = []
        try:
            rootxml, in_cache = self.get_request(URL)
        except Exception as e:
            logger.error("Error finding authorid: " + str(e) + str(URL))
            return authorlist

        resultxml = rootxml.getiterator('author')

        if not len(resultxml):
            logger.warn('No authors found with name: %s' % author)
        else:
            # In spite of how this looks, goodreads only returns one result, even if there are multiple matches
            # we just have to hope we get the right one. eg search for "James Lovelock" returns "James E. Lovelock"
            # who only has one book listed under googlebooks, the rest are under "James Lovelock"
            # goodreads has all his books under "James E. Lovelock". Can't come up with a good solution yet.
            # For now we'll have to let the user handle this by selecting/adding the author manually
            for author in resultxml:
                authorid = author.attrib.get("id")
                authorname = author[0].text
                authorlist = self.get_author_info(authorid, authorname, refresh)
        return authorlist
Exemplo n.º 17
0
def shutdown(restart=False, update=False):

    cherrypy.engine.exit()
    SCHED.shutdown(wait=False)
    config_write()

    if not restart and not update:
        logger.info("LazyLibrarian is shutting down...")
    if update:
        logger.info("LazyLibrarian is updating...")
        try:
            versioncheck.update()
        except Exception as e:
            logger.warn("LazyLibrarian failed to update: %s. Restarting." % e)

    if PIDFILE:
        logger.info("Removing pidfile %s" % PIDFILE)
        os.remove(PIDFILE)

    if restart:
        logger.info("LazyLibrarian is restarting ...")
        popen_list = [sys.executable, FULL_PATH]
        popen_list += ARGS
        if "--nolaunch" not in popen_list:
            popen_list += ["--nolaunch"]
            logger.info("Restarting LazyLibrarian with " + str(popen_list))
        subprocess.Popen(popen_list, cwd=os.getcwd())

    os._exit(0)
Exemplo n.º 18
0
def create_cover(issuefile=None):
    if not lazylibrarian.IMP_CONVERT == "None":  # special flag to say "no covers required"
        # create a thumbnail cover if there isn't one
        if "." in issuefile:
            words = issuefile.split(".")
            extn = "." + words[len(words) - 1]
            coverfile = issuefile.replace(extn, ".jpg")
        else:
            logger.debug("Unable to create cover for %s, no extension?" % issuefile)
            return
        if not os.path.isfile(coverfile):
            logger.debug("Creating cover for %s using %s" % (issuefile, lazylibrarian.MAGICK))
            try:
                # No PythonMagick in python3, hence allow wand, but more complicated
                # to install - try to use external imagemagick convert?
                # should work on win/mac/linux as long as imagemagick is installed
                # and config points to external "convert" program

                if len(lazylibrarian.IMP_CONVERT):  # allow external convert to override libraries
                    try:
                        params = [lazylibrarian.IMP_CONVERT, issuefile + "[0]", coverfile]
                        subprocess.check_output(params, stderr=subprocess.STDOUT)
                    except subprocess.CalledProcessError, e:
                        logger.warn('ImageMagick "convert" failed %s' % e.output)

                elif lazylibrarian.MAGICK == "wand":
                    with Image(filename=issuefile + "[0]") as img:
                        img.save(filename=coverfile)

                elif lazylibrarian.MAGICK == "pythonmagick":
                    img = PythonMagick.Image()
                    img.read(issuefile + "[0]")
                    img.write(coverfile)
            except:
Exemplo n.º 19
0
def exportCSV(search_dir=None, status="Wanted"):
    """ Write a csv file to the search_dir containing all books marked as "Wanted" """
     
    if not search_dir:
        logger.warn("Alternate Directory must not be empty")
        return False
    
    csvFile = os.path.join(search_dir, "%s - %s.csv" % (status, formatter.now()))  
    
    myDB = database.DBConnection() 
    
    find_status = myDB.select('SELECT * FROM books WHERE Status = "%s"' % status)
    
    if not find_status:
        logger.warn("No books marked as %s" % status)
    else:
        with open(csvFile, 'wb') as csvfile:
            csvwrite = csv.writer(csvfile, delimiter=',',
                quotechar='"', quoting=csv.QUOTE_MINIMAL)
                
            # write headers, change AuthorName BookName BookIsbn to match import csv names (Author, Title, ISBN10)
            csvwrite.writerow([
                'BookID', 'Author', 'Title', 
                'ISBN', 'AuthorID'
                ])
        
            for resulted in find_status:
                logger.debug("Exported CSV for book %s" % resulted['BookName'].encode('utf-8'))
                row = ([
                    resulted['BookID'], resulted['AuthorName'], resulted['BookName'], 
                    resulted['BookIsbn'], resulted['AuthorID']       
                    ])
                csvwrite.writerow([("%s" % s).encode('utf-8') for s in row])
        logger.info("CSV exported to %s" % csvFile)
Exemplo n.º 20
0
 def __init__(self, name=None, type=None):
     self.name = name
     self.type = type
     if not lazylibrarian.GB_API:
         logger.warn("No GoogleBooks API key, check config")
     self.url = "https://www.googleapis.com/books/v1/volumes?q="
     self.params = {"maxResults": 40, "printType": "books", "key": lazylibrarian.GB_API}
Exemplo n.º 21
0
def NZBMatrix(book=None):

    results = []

    if ((lazylibrarian.EBOOK_TYPE == None) or (lazylibrarian.EBOOK_TYPE == "")):
        params = {
            "page": "download",
            "username": lazylibrarian.NZBMATRIX_USER,
            "apikey": lazylibrarian.NZBMATRIX_API,
            "subcat": 36,
            "age": lazylibrarian.USENET_RETENTION,
            "term": book['searchterm']
        }
    else:
        params = {
            "page": "download",
            "username": lazylibrarian.NZBMATRIX_USER,
            "apikey": lazylibrarian.NZBMATRIX_API,
            "subcat": 36,
            "age": lazylibrarian.USENET_RETENTION,
            "term": book['searchterm']
        }
        logger.debug('Searching for: ' + book['searchterm'])
    URL = "http://rss.nzbmatrix.com/rss.php?" + urllib.urlencode(params)
    # to debug because of api
    logger.debug(u'Parsing results from <a href="%s">NZBMatrix</a>' % (URL))

    try:
        data = ElementTree.parse(urllib2.urlopen(URL, timeout=30))
    except (urllib2.URLError, IOError, EOFError), e:
        logger.warn('Error fetching data from NZBMatrix: %s' % e)
        data = None
Exemplo n.º 22
0
    def get_author_info(self, authorid=None, authorname=None, refresh=False):

        URL = 'http://www.goodreads.com/author/show/' + authorid + '.xml?' + urllib.urlencode(self.params)
        author_dict = {}

        try:
            rootxml, in_cache = get_xml_request(URL)
        except Exception as e:
            logger.error("Error getting author info: %s" % e)
            return author_dict
        if rootxml is None:
            logger.debug("Error requesting author info")
            return author_dict

        resultxml = rootxml.find('author')

        if not len(resultxml):
            logger.warn('No author found with ID: ' + authorid)
        else:
            logger.debug("[%s] Processing info for authorID: %s" % (authorname, authorid))

            # PAB added authorname to author_dict - this holds the intact name preferred by GR
            author_dict = {
                'authorid': resultxml[0].text,
                'authorlink': resultxml.find('link').text,
                'authorimg': resultxml.find('image_url').text,
                'authorborn': resultxml.find('born_at').text,
                'authordeath': resultxml.find('died_at').text,
                'totalbooks': resultxml.find('works_count').text,
                'authorname': authorname
            }
        return author_dict
Exemplo n.º 23
0
def NZBDownloadMethod(bookid=None, nzbprov=None, nzbtitle=None, nzburl=None):

    myDB = database.DBConnection()
    if (lazylibrarian.NZB_DOWNLOADER_SABNZBD and lazylibrarian.SAB_HOST) and not lazylibrarian.NZB_DOWNLOADER_BLACKHOLE:
        download = sabnzbd.SABnzbd(nzbtitle, nzburl)
    elif (
        lazylibrarian.NZB_DOWNLOADER_NZBGET and lazylibrarian.NZBGET_HOST
    ) and not lazylibrarian.NZB_DOWNLOADER_BLACKHOLE:
        headers = {"User-Agent": USER_AGENT}
        data = request.request_content(url=nzburl, headers=headers)
        nzb = classes.NZBDataSearchResult()
        nzb.extraInfo.append(data)
        nzb.name = nzbtitle
        nzb.url = nzburl
        download = nzbget.sendNZB(nzb)

    elif lazylibrarian.NZB_DOWNLOADER_BLACKHOLE:

        try:
            req = urllib2.Request(nzburl)
            if lazylibrarian.PROXY_HOST:
                req.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE)
            req.add_header("User-Agent", USER_AGENT)
            nzbfile = urllib2.urlopen(req, timeout=90).read()

        except (urllib2.URLError, socket.timeout) as e:
            logger.warn("Error fetching nzb from url: %s, %s" % (nzburl, e))
            nzbfile = False

        if nzbfile:

            nzbname = str(nzbtitle) + ".nzb"
            nzbpath = os.path.join(lazylibrarian.NZB_BLACKHOLEDIR, nzbname)

            try:
                with open(nzbpath, "w") as f:
                    f.write(nzbfile)
                logger.debug("NZB file saved to: " + nzbpath)
                download = True
                # try:
                #    os.chmod(nzbpath, 0777)
                # except Exception, e:
                #    logger.error("Could not chmod path: " + str(nzbpath))
            except Exception as e:
                logger.error("%s not writable, NZB not saved. Error: %s" % (nzbpath, e))
                download = False

    else:
        logger.warn("No NZB download method is enabled, check config.")
        return False

    if download:
        logger.debug("Nzbfile has been downloaded from " + str(nzburl))
        myDB.action('UPDATE books SET status = "Snatched" WHERE BookID="%s"' % bookid)
        myDB.action('UPDATE wanted SET status = "Snatched" WHERE NZBurl="%s"' % nzburl)
        return True
    else:
        logger.error(u'Failed to download nzb @ <a href="%s">%s</a>' % (nzburl, nzbprov))
        myDB.action('UPDATE wanted SET status = "Failed" WHERE NZBurl="%s"' % nzburl)
        return False
Exemplo n.º 24
0
def getWorkSeries(bookID=None):
    """ Return the series names and numbers in series for the given id as a list of tuples
        For goodreads the id is a WorkID, for librarything it's a BookID """
    myDB = database.DBConnection()
    serieslist = []
    if not bookID:
        logger.error("getWorkSeries - No bookID")
        return serieslist

    if lazylibrarian.CONFIG['BOOK_API'] == 'GoodReads':
        URL = "https://www.goodreads.com/work/"
        seriesurl = URL + bookID + "/series?format=xml&key=" + lazylibrarian.CONFIG['GR_API']

        rootxml, in_cache = gr_xml_request(seriesurl)
        if rootxml is None:
            logger.warn('Error getting XML for %s' % seriesurl)
        else:
            resultxml = rootxml.getiterator('series_work')
            for item in resultxml:
                try:
                    seriesname = item.find('./series/title').text
                    seriesname = seriesname.strip('\n').strip('\n').strip()
                    seriesid = item.find('./series/id').text
                    seriesnum = item.find('./user_position').text
                except (KeyError, AttributeError):
                    continue
                if seriesname and seriesid:
                    seriesname = cleanName(unaccented(seriesname), '&/')
                    seriesnum = cleanName(unaccented(seriesnum))
                    serieslist.append((seriesid, seriesnum, seriesname))
                    match = myDB.match('SELECT SeriesID from series WHERE SeriesName=?', (seriesname,))
                    if not match:
                        myDB.action('INSERT INTO series VALUES (?, ?, ?, ?, ?)',
                                    (seriesid, seriesname, "Active", 0, 0))
                    elif match['SeriesID'] != seriesid:
                        myDB.action('UPDATE series SET SeriesID=? WHERE SeriesName=?', (seriesid, seriesname))
    else:
        work = getBookWork(bookID, "Series")
        if work:
            try:
                slist = work.split('<h3><b>Series:')[1].split('</h3>')[0].split('<a href="/series/')
                for item in slist[1:]:
                    try:
                        series = item.split('">')[1].split('</a>')[0]
                        if series and '(' in series:
                            seriesnum = series.split('(')[1].split(')')[0].strip()
                            series = series.split(' (')[0].strip()
                        else:
                            seriesnum = ''
                            series = series.strip()
                        seriesname = cleanName(unaccented(series), '&/')
                        seriesnum = cleanName(unaccented(seriesnum))
                        serieslist.append(('', seriesnum, seriesname))
                    except IndexError:
                        pass
            except IndexError:
                pass

    return serieslist
Exemplo n.º 25
0
    def _sendAndroidPN(self, title, msg, url, username, broadcast):

        # build up the URL and parameters
        msg = msg.strip()
        if PY2:
            msg = msg.encode(lazylibrarian.SYS_ENCODING)

        data = {
            'action': "send",
            'broadcast': broadcast,
            'uri': "",
            'title': title,
            'username': username,
            'message': msg,
        }
        proxies = proxyList()
        # send the request
        try:
            timeout = check_int(lazylibrarian.CONFIG['HTTP_TIMEOUT'], 30)
            r = requests.get(url, params=data, timeout=timeout, proxies=proxies)
            status = str(r.status_code)
            if status.startswith('2'):
                logger.debug("ANDROIDPN: Notification successful.")
                return True

            # HTTP status 404 if the provided email address isn't a AndroidPN user.
            if status == '404':
                logger.warn("ANDROIDPN: Username is wrong/not a AndroidPN email. AndroidPN will send an email to it")
            # For HTTP status code 401's, it is because you are passing in either an
            # invalid token, or the user has not added your service.
            elif status == '401':
                subscribeNote = self._sendAndroidPN(title, msg, url, username, broadcast)
                if subscribeNote:
                    logger.debug("ANDROIDPN: Subscription sent")
                    return True
                else:
                    logger.error("ANDROIDPN: Subscription could not be sent")

            # If you receive an HTTP status code of 400, it is because you failed to send the proper parameters
            elif status == '400':
                logger.error("ANDROIDPN: Wrong data sent to AndroidPN")
            else:
                logger.error("ANDROIDPN: Got error code %s" % status)
            return False

        except Exception as e:
            # URLError only returns a reason, not a code. HTTPError gives a code
            # FIXME: Python 2.5 hack, it wrongly reports 201 as an error
            if hasattr(e, 'code') and e.code == 201:
                logger.debug("ANDROIDPN: Notification successful.")
                return True

            # if we get an error back that doesn't have an error code then who knows what's really happening
            if not hasattr(e, 'code'):
                logger.error("ANDROIDPN: Notification failed.")
            else:
                # noinspection PyUnresolvedReferences
                logger.error("ANDROIDPN: Notification failed. Error code: " + str(e.code))
            return False
Exemplo n.º 26
0
 def _api_version(self):
     # noinspection PyBroadException
     try:
         version = int(self._command('version/api'))
     except Exception as err:
         logger.warn('Error getting api version. qBittorrent %s: %s' % (type(err).__name__, str(err)))
         version = 1
     return version
Exemplo n.º 27
0
def getBookAuthors(bookid):
    """ Get a list of authors contributing to a book from the goodreads bookpage or the librarything bookwork file """
    authorlist = []
    if lazylibrarian.CONFIG['BOOK_API'] == 'GoodReads':
        params = {"key": lazylibrarian.CONFIG['GR_API']}
        URL = 'https://www.goodreads.com/book/show/' + bookid + '?' + urlencode(params)
        try:
            rootxml, in_cache = gr_xml_request(URL)
            if rootxml is None:
                logger.debug("Error requesting book %s" % bookid)
                return []
        except Exception as e:
            logger.error("%s finding book %s: %s" % (type(e).__name__, bookid, str(e)))
            return []

        book = rootxml.find('book')
        authors = book.find('authors')
        anames = authors.getiterator('author')
        if anames is None:
            logger.warn('No authors found for %s' % bookid)
            return []
        for aname in anames:
            author = {}
            if aname.find('id') is not None:
                author['id'] = aname.find('id').text
            if aname.find('name') is not None:
                author['name'] = aname.find('name').text
            if aname.find('role') is not None:
                role = aname.find('role').text
                if not role:
                    role = ''
                author['role'] = role
            if author:
                authorlist.append(author)
    else:
        data = getBookWork(bookid, "Authors")
        if data:
            try:
                data = data.split('otherauthors_container')[1].split('</table>')[0].split('<table')[1].split('>', 1)[1]
            except IndexError:
                data = ''

        authorlist = []
        if data and 'Work?' in data:
            try:
                rows = data.split('<tr')
                for row in rows[2:]:
                    author = {}
                    col = row.split('<td>')
                    author['name'] = col[1].split('">')[1].split('<')[0]
                    author['role'] = col[2].split('<')[0]
                    author['type'] = col[3].split('<')[0]
                    author['work'] = col[4].split('<')[0]
                    author['status'] = col[5].split('<')[0]
                    authorlist.append(author)
            except IndexError:
                logger.debug('Error parsing authorlist for %s' % bookid)
    return authorlist
Exemplo n.º 28
0
def getCommitDifferenceFromGit():
    commits = -1
    # Takes current latest version value and trys to diff it with the latest
    # version in the current branch.
    commit_list = ''
    if lazylibrarian.LATEST_VERSION == 'Not_Available_From_GitHUB':
        commits = 0  # don't report a commit diff as we don't know anything
    if lazylibrarian.CURRENT_VERSION and commits != 0:
        logger.info('[VersionCheck] -  Comparing currently installed version with latest github version')
        url = 'https://api.github.com/repos/%s/LazyLibrarian/compare/%s...%s' % (
            lazylibrarian.GIT_USER, lazylibrarian.CURRENT_VERSION, lazylibrarian.LATEST_VERSION)
        logger.debug('(getCommitDifferenceFromGit) -  Check for differences between local & repo by [%s]' % url)

        try:
            result = urllib2.urlopen(url, timeout=30).read()

            try:
                logger.debug('JSONDecode url')
                git = simplejson.JSONDecoder().decode(result)
                logger.debug('pull total_commits from json object')
                commits = git['total_commits']

                logger.debug('(getCommitDifferenceFromGit) -  GitHub reports as follows Status [%s] - Ahead [%s] - Behind [%s] - Total Commits [%s]' % (
                             git['status'], git['ahead_by'], git['behind_by'], git['total_commits']))

                if git['total_commits'] > 0:
                    messages = []
                    for item in git['commits']:
                        messages.insert(0, item['commit']['message'])
                    for line in messages:
                        commit_list = "%s\n%s" % (commit_list, line)
            except Exception:
                logger.warn('(getCommitDifferenceFromGit) -  could not get difference status from GitHub')

        except Exception:
            logger.warn(
                '(getCommitDifferenceFromGit) -  Could not get commits behind from github. Can happen if you have a local commit not pushed to repo or no connection to github')

        if commits > 1:
            logger.info('[VersionCheck] -  New version is available. You are %s commits behind' % commits)
        elif commits == 1:
            logger.info('[VersionCheck] -  New version is available. You are one commit behind')
        elif commits == 0:
            logger.info('[VersionCheck] -  lazylibrarian is up to date ')
        elif commits == -1:
            logger.info(
                '[VersionCheck] -  You are running an unknown version of lazylibrarian. Run the updater to identify your version')

    elif lazylibrarian.LATEST_VERSION == 'Not_Available_From_GitHUB':
        commit_list = 'Unable to get latest version from GitHub'
        logger.info(commit_list)
    else:
        logger.info('You are running an unknown version of lazylibrarian. Run the updater to identify your version')

    logger.debug('(getCommitDifferenceFromGit) - exiting with commit value of [%s]' % commits)
    # lazylibrarian.COMMITS_BEHIND = commits
    return commits, commit_list
Exemplo n.º 29
0
 def __init__(self, name=None):
     if isinstance(name, str) and hasattr(name, "decode"):
         self.name = name.decode(lazylibrarian.SYS_ENCODING)
     else:
         self.name = name
     # self.type = type
     if not lazylibrarian.CONFIG['GR_API']:
         logger.warn('No Goodreads API key, check config')
     self.params = {"key": lazylibrarian.CONFIG['GR_API']}
Exemplo n.º 30
0
    def _sendProwl(prowl_api=None,
                   prowl_priority=None,
                   event=None,
                   message=None,
                   force=False):

        title = "LazyLibrarian"

        # suppress notifications if the notifier is disabled but the notify options are checked
        if not lazylibrarian.CONFIG['USE_PROWL'] and not force:
            return False

        if prowl_api is None:
            prowl_api = lazylibrarian.CONFIG['PROWL_APIKEY']

        if prowl_priority is None:
            prowl_priority = lazylibrarian.CONFIG['PROWL_PRIORITY']

        message = message.encode(lazylibrarian.SYS_ENCODING)

        logger.debug(u"Prowl: title: " + title)
        logger.debug(u"Prowl: event: " + event)
        logger.debug(u"Prowl: message: " + message)

        data = {
            'event': event,
            'description': message,
            'application': title,
            'apikey': prowl_api,
            'priority': prowl_priority
        }

        try:
            http_handler = HTTPSConnection("api.prowlapp.com")

            http_handler.request(
                "POST",
                "/publicapi/add",
                headers={'Content-type': "application/x-www-form-urlencoded"},
                body=urlencode(data))

            response = http_handler.getresponse()
            request_status = response.status

            if request_status == 200:
                logger.info('Prowl notifications sent.')
                return True
            elif request_status == 401:
                logger.info('Prowl auth failed: %s' % response.reason)
                return False
            else:
                logger.info('Prowl notification failed.')
                return False

        except Exception as e:
            logger.warn('Error sending to Prowl: %s' % e)
            return False
Exemplo n.º 31
0
 def _api_version(self):
     # noinspection PyBroadException
     try:
         version = int(self._command('version/api'))
     except Exception as err:
         logger.warn('Error getting api version. qBittorrent %s: %s' %
                     (type(err).__name__, str(err)))
         version = 1
     return version
Exemplo n.º 32
0
def notify_download(title, bookid=None):
    try:
        for n in notifiers:
            if 'EmailNotifier' in str(n):
                n.notify_download(title, bookid=bookid)
            else:
                n.notify_download(title)
    except Exception as e:
        logger.warn('Notify download failed: %s' % str(e))
Exemplo n.º 33
0
def LISTOPIA(host=None, feednr=None, priority=0):
    """
    Goodreads Listopia query function, return all the results in a list
    """
    results = []
    maxpage = priority
    basehost = host
    if not str(host)[:4] == "http":
        host = 'http://' + host

    page = 0
    next_page = True

    while next_page:
        URL = host
        if page:
            URL = "%s?page=%i" % (host, page)

        result, success = fetchURL(URL)
        next_page = False

        if not success:
            logger.error('Error fetching data from %s: %s' % (URL, result))
            BlockProvider(basehost, result)

        elif result:
            logger.debug('Parsing results from %s' % URL)
            data = result.split('<td valign="top" class="number">')
            for entry in data[1:]:
                try:
                    # index = entry.split('<')[0]
                    title = entry.split('<a title="')[1].split('"')[0]
                    book_id = entry.split('data-resource-id="')[1].split('"')[0]
                    author_name = entry.split('<a class="authorName"')[1].split('"name">')[1].split('<')[0]
                    results.append({
                        'rss_prov': host.split('/list/show/')[1],
                        'rss_feed': feednr,
                        'rss_title': title,
                        'rss_author': author_name,
                        'rss_bookid': book_id,
                        'rss_isbn': '',
                        'priority': priority
                    })
                    next_page = True
                except IndexError:
                    pass
        else:
            logger.debug('No data returned from %s' % URL)

        page += 1
        if maxpage:
            if page >= maxpage:
                logger.warn('Maximum results page reached, still more results available')
                next_page = False

    logger.debug("Found %i result%s from %s" % (len(results), plural(len(results)), host))
    return results
Exemplo n.º 34
0
    def get_gr_shelf_contents(self, shelf='to-read'):
        global consumer, client, token, user_id
        if not lazylibrarian.CONFIG['GR_API'] or not lazylibrarian.CONFIG['GR_SECRET'] or not \
                lazylibrarian.CONFIG['GR_OAUTH_TOKEN'] or not lazylibrarian.CONFIG['GR_OAUTH_SECRET']:
            logger.warn(
                "Goodreads shelf contents error: Please authorise first")
            return []
        else:
            #
            # loop over each page of owned books
            #     loop over each book
            #         add book to list
            #
            consumer = oauth.Consumer(key=str(lazylibrarian.CONFIG['GR_API']),
                                      secret=str(
                                          lazylibrarian.CONFIG['GR_SECRET']))
            token = oauth.Token(lazylibrarian.CONFIG['GR_OAUTH_TOKEN'],
                                lazylibrarian.CONFIG['GR_OAUTH_SECRET'])
            client = oauth.Client(consumer, token)
            user_id = self.getUserId()
            logger.debug('User id is: ' + user_id)

            current_page = 0
            total_books = 0
            gr_list = []

            while True:
                current_page = current_page + 1
                content = self.getShelfBooks(current_page, shelf)
                xmldoc = xml.dom.minidom.parseString(content)

                page_books = 0
                for book in xmldoc.getElementsByTagName('book'):
                    book_id, book_title = self.getBookInfo(book)

                    if lazylibrarian.LOGLEVEL & lazylibrarian.log_grsync:
                        try:
                            logger.debug('Book %10s : %s' %
                                         (str(book_id), book_title))
                        except UnicodeEncodeError:
                            logger.debug('Book %10s : %s' %
                                         (str(book_id),
                                          'Title Messed Up By Unicode Error'))

                    gr_list.append(book_id)

                    page_books += 1
                    total_books += 1

                if lazylibrarian.LOGLEVEL & lazylibrarian.log_grsync:
                    logger.debug('Found %s books on page %s (total = %s)' %
                                 (page_books, current_page, total_books))
                if page_books == 0:
                    break

            logger.debug('Found %s' % total_books)
            return gr_list
Exemplo n.º 35
0
def DirectDownloadMethod(bookid=None, tor_prov=None, tor_title=None, tor_url=None, bookname=None):
    myDB = database.DBConnection()
    downloadID = False
    Source = "DIRECT"
    full_url = tor_url  # keep the url as stored in "wanted" table

    request = urllib2.Request(ur'%s' % tor_url)
    if lazylibrarian.PROXY_HOST:
        request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE)
    request.add_header('Accept-encoding', 'gzip')
    request.add_header('User-Agent', USER_AGENT)

    try:
        response = urllib2.urlopen(request, timeout=90)
        if response.info().get('Content-Encoding') == 'gzip':
            buf = StringIO(response.read())
            f = gzip.GzipFile(fileobj=buf)
            fdata = f.read()
        else:
            fdata = response.read()
        bookname = '.'.join(bookname.rsplit(' ', 1))  # last word is the extension
        logger.debug("File download got %s bytes for %s/%s" % (len(fdata), tor_title, bookname))
        destdir = os.path.join(lazylibrarian.DIRECTORY('Download'), tor_title)
        try:
            os.makedirs(destdir)
            setperm(destdir)
        except OSError as e:
            if e.errno is not 17:  # directory already exists is ok. Using errno because of different languages
                logger.debug("Error creating directory %s, %s" % (destdir, e.strerror))

        destfile = os.path.join(destdir, bookname)
        try:
            with open(destfile, 'wb') as bookfile:
                bookfile.write(fdata)
            setperm(destfile)
            downloadID = True
        except Exception as e:
            logger.debug("Error writing book to %s, %s" % (destfile, str(e)))

    except (socket.timeout) as e:
        logger.warn('Timeout fetching file from url: %s' % tor_url)
        return False
    except (urllib2.URLError) as e:
        logger.warn('Error fetching file from url: %s, %s' % (tor_url, e.reason))
        return False

    if downloadID:
        logger.debug(u'File %s has been downloaded from %s' % (tor_title, tor_url))
        myDB.action('UPDATE books SET status = "Snatched" WHERE BookID="%s"' % bookid)
        myDB.action('UPDATE wanted SET status = "Snatched", Source = "%s", DownloadID = "%s" WHERE NZBurl="%s"' %
                    (Source, downloadID, full_url))
        return True
    else:
        logger.error(u'Failed to download file @ <a href="%s">%s</a>' % (full_url, tor_url))
        myDB.action('UPDATE wanted SET status = "Failed" WHERE NZBurl="%s"' % full_url)
        return False
Exemplo n.º 36
0
def addAuthorToDB(authorname=None, refresh=False):

    myDB = database.DBConnection()

    GR = GoodReads(authorname)

    query = "SELECT * from authors WHERE AuthorName='%s'" % authorname.replace("'", "''")
    dbauthor = myDB.action(query).fetchone()
    controlValueDict = {"AuthorName": authorname}

    if dbauthor is None:
        newValueDict = {
            "AuthorID": "0: %s" % (authorname),
            "Status": "Loading"
        }
        logger.debug("Now adding new author: %s to database" % authorname)
    else:
        newValueDict = {"Status": "Loading"}
        logger.debug("Now updating author: %s" % authorname)
    myDB.upsert("authors", newValueDict, controlValueDict)

    author = GR.find_author_id(refresh=refresh)
    if author:
        authorid = author['authorid']
        authorlink = author['authorlink']
        authorimg = author['authorimg']
        if 'nophoto' in authorimg:
            authorimg = getAuthorImage(authorid)
        if authorimg and authorimg.startswith('http'):
            newimg = cache_cover(authorid, authorimg)
            if newimg:
                authorimg = newimg
        controlValueDict = {"AuthorName": authorname}
        newValueDict = {
            "AuthorID": authorid,
            "AuthorLink": authorlink,
            "AuthorImg": authorimg,
            "AuthorBorn": author['authorborn'],
            "AuthorDeath": author['authordeath'],
            "DateAdded": today(),
            "Status": "Loading"
        }
        myDB.upsert("authors", newValueDict, controlValueDict)
    else:
        logger.warn(u"Nothing found for %s" % authorname)
        myDB.action('DELETE from authors WHERE AuthorName="%s"' % authorname)
        return
# process books
    if lazylibrarian.BOOK_API == "GoogleBooks":
        book_api = GoogleBooks()
        book_api.get_author_books(authorid, authorname, refresh=refresh)
    elif lazylibrarian.BOOK_API == "GoodReads":
        GR.get_author_books(authorid, authorname, refresh=refresh)

    update_totals(authorid)
    logger.debug("[%s] Author update complete" % authorname)
Exemplo n.º 37
0
def DirectDownloadMethod(bookid=None, tor_title=None, tor_url=None, bookname=None):
    myDB = database.DBConnection()
    downloadID = False
    Source = "DIRECT"
    full_url = tor_url  # keep the url as stored in "wanted" table

    request = urllib2.Request(ur'%s' % tor_url)
    if lazylibrarian.CONFIG['PROXY_HOST']:
        request.set_proxy(lazylibrarian.CONFIG['PROXY_HOST'], lazylibrarian.CONFIG['PROXY_TYPE'])
    request.add_header('Accept-encoding', 'gzip')
    request.add_header('User-Agent', USER_AGENT)

    try:
        response = urllib2.urlopen(request, timeout=90)
        if response.info().get('Content-Encoding') == 'gzip':
            buf = StringIO(response.read())
            f = gzip.GzipFile(fileobj=buf)
            fdata = f.read()
        else:
            fdata = response.read()
        bookname = '.'.join(bookname.rsplit(' ', 1))  # last word is the extension
        logger.debug("File download got %s bytes for %s/%s" % (len(fdata), tor_title, bookname))
        destdir = os.path.join(lazylibrarian.DIRECTORY('Download'), tor_title)
        try:
            os.makedirs(destdir)
            setperm(destdir)
        except OSError as e:
            if e.errno is not 17:  # directory already exists is ok. Using errno because of different languages
                logger.debug("Error creating directory %s, %s" % (destdir, e.strerror))

        destfile = os.path.join(destdir, bookname)
        try:
            with open(destfile, 'wb') as bookfile:
                bookfile.write(fdata)
            setperm(destfile)
            downloadID = True
        except Exception as e:
            logger.debug("Error writing book to %s, %s" % (destfile, str(e)))

    except socket.timeout:
        logger.warn('Timeout fetching file from url: %s' % tor_url)
        return False
    except urllib2.URLError as e:
        logger.warn('Error fetching file from url: %s, %s' % (tor_url, e.reason))
        return False

    if downloadID:
        logger.debug(u'File %s has been downloaded from %s' % (tor_title, tor_url))
        myDB.action('UPDATE books SET status = "Snatched" WHERE BookID="%s"' % bookid)
        myDB.action('UPDATE wanted SET status = "Snatched", Source = "%s", DownloadID = "%s" WHERE NZBurl="%s"' %
                    (Source, downloadID, full_url))
        return True
    else:
        logger.error(u'Failed to download file @ <a href="%s">%s</a>' % (full_url, tor_url))
        myDB.action('UPDATE wanted SET status = "Failed" WHERE NZBurl="%s"' % full_url)
        return False
Exemplo n.º 38
0
def dump_table(table, savedir=None, status=None):
    myDB = database.DBConnection()
    # noinspection PyBroadException
    try:
        columns = myDB.select('PRAGMA table_info(%s)' % table)
        if not columns:  # no such table
            logger.warn("No such table [%s]" % table)
            return 0

        if not os.path.isdir(savedir):
            savedir = lazylibrarian.DATADIR

        headers = ''
        for item in columns:
            if headers:
                headers += ','
            headers += item[1]
        if status:
            cmd = 'SELECT %s from %s WHERE status="%s"' % (headers, table,
                                                           status)
        else:
            cmd = 'SELECT %s from %s' % (headers, table)
        data = myDB.select(cmd)
        count = 0
        if data is not None:
            label = table
            if status:
                label += '_%s' % status
            csvFile = os.path.join(savedir, "%s.csv" % label)

            if PY2:
                fmode = 'wb'
            else:
                fmode = 'w'
            with open(csvFile, fmode) as csvfile:
                csvwrite = writer(csvfile,
                                  delimiter=',',
                                  quotechar='"',
                                  quoting=QUOTE_MINIMAL)
                headers = headers.split(',')
                csvwrite.writerow(headers)
                for item in data:
                    if PY2:
                        csvwrite.writerow(
                            [makeBytestr(s) if s else '' for s in item])
                    else:
                        csvwrite.writerow([str(s) if s else '' for s in item])
                    count += 1
            msg = "Exported %s item%s to %s" % (count, plural(count), csvFile)
            logger.info(msg)
        return count

    except Exception:
        msg = 'Unhandled exception in dump_table: %s' % traceback.format_exc()
        logger.error(msg)
        return 0
Exemplo n.º 39
0
 def __init__(self, name=None):
     self.name = name
     if not lazylibrarian.CONFIG['GB_API']:
         logger.warn('No GoogleBooks API key, check config')
     self.url = 'https://www.googleapis.com/books/v1/volumes?q='
     self.params = {
         'maxResults': 40,
         'printType': 'books',
         'key': lazylibrarian.CONFIG['GB_API']
     }
Exemplo n.º 40
0
def NewzNab(book=None, newznabNumber=None):

    if (newznabNumber == "1"):
        HOST = lazylibrarian.NEWZNAB_HOST
        logger.info('Searching for %s.' % book['searchterm'] + " at: " +
                    lazylibrarian.NEWZNAB_HOST)
    if (newznabNumber == "2"):
        HOST = lazylibrarian.NEWZNAB_HOST2
        logger.info('Searching for %s.' % book['searchterm'] + " at: " +
                    lazylibrarian.NEWZNAB_HOST2)

    results = []

    if lazylibrarian.EBOOK_TYPE == None:
        params = {
            "t": "book",
            "apikey": lazylibrarian.NEWZNAB_API,
            #"cat": 7020,
            "author": book['searchterm']
        }
    else:
        params = {
            "t": "search",
            "apikey": lazylibrarian.NEWZNAB_API,
            "cat": 7020,
            "q": book['searchterm'],
            "extended": 1,
        }

    if not str(HOST)[:4] == "http":
        HOST = 'http://' + HOST

    URL = HOST + '/api?' + urllib.urlencode(params)

    try:
        request = urllib2.Request(URL)
        if lazylibrarian.PROXY_HOST:
            request.set_proxy(lazylibrarian.PROXY_HOST,
                              lazylibrarian.PROXY_TYPE)
        request.add_header('User-Agent', common.USER_AGENT)
        opener = urllib2.build_opener(
            SimpleCache.CacheHandler(".ProviderCache"),
            SimpleCache.ThrottlingProcessor(5))
        resp = opener.open(request)

        try:
            data = ElementTree.parse(resp)
        except (urllib2.URLError, IOError, EOFError), e:
            logger.warn('Error fetching data from %s: %s' %
                        (lazylibrarian.NEWZNAB_HOST, e))
            data = None

    except Exception, e:
        logger.error("Error 403 openning url")
        data = None
Exemplo n.º 41
0
    def follow_author(self, authorid=None, follow=True):
        global consumer, client, token, user_id
        if not lazylibrarian.CONFIG['GR_API'] or not lazylibrarian.CONFIG['GR_SECRET'] or not \
                lazylibrarian.CONFIG['GR_OAUTH_TOKEN'] or not lazylibrarian.CONFIG['GR_OAUTH_SECRET']:
            logger.warn(
                "Goodreads follow author error: Please authorise first")
            return False, 'Unauthorised'

        consumer = oauth.Consumer(key=str(lazylibrarian.CONFIG['GR_API']),
                                  secret=str(
                                      lazylibrarian.CONFIG['GR_SECRET']))
        token = oauth.Token(lazylibrarian.CONFIG['GR_OAUTH_TOKEN'],
                            lazylibrarian.CONFIG['GR_OAUTH_SECRET'])
        client = oauth.Client(consumer, token)
        user_id = self.getUserId()

        # follow https://www.goodreads.com/author_followings?id=AUTHOR_ID&format=xml
        # unfollow https://www.goodreads.com/author_followings/AUTHOR_FOLLOWING_ID?format=xml
        time_now = int(time.time())
        if time_now <= lazylibrarian.LAST_GOODREADS:
            time.sleep(1)
            lazylibrarian.LAST_GOODREADS = time_now

        if follow:
            body = urlencode({'id': authorid, 'format': 'xml'})
            headers = {'Content-Type': 'application/x-www-form-urlencoded'}
            try:
                response, content = client.request(
                    '%s/author_followings' % 'https://www.goodreads.com',
                    'POST', body, headers)
            except Exception as e:
                logger.error("Exception in client.request: %s %s" %
                             (type(e).__name__, traceback.format_exc()))
                return False, "Error in client.request: see error log"
        else:
            body = urlencode({'format': 'xml'})
            headers = {'Content-Type': 'application/x-www-form-urlencoded'}
            try:
                response, content = client.request(
                    '%s/author_followings/%s' %
                    ('https://www.goodreads.com', authorid), 'DELETE', body,
                    headers)
            except Exception as e:
                logger.error("Exception in client.request: %s %s" %
                             (type(e).__name__, traceback.format_exc()))
                return False, "Error in client.request: see error log"

        if follow and response['status'] == '422':
            return True, 'Already following'

        if response['status'].startswith('2'):
            if follow:
                return True, content.split('<id>')[1].split('</id>')[0]
            return True, ''
        return False, 'Failure status: %s' % response['status']
Exemplo n.º 42
0
    def _notify(self, message=None, event=None, slack_token=None, method=None, force=False):
        """
        Sends a slack incoming-webhook notification based on the provided info or LL config

        message: The message string to send
        force: If True then the notification will be sent even if slack is disabled in the config
        """
        try:
            message = unaccented(message)
        except Exception, e:
            logger.warn("Slack: could not convert message: %s" % e)
Exemplo n.º 43
0
def get_xml_request(my_url, useCache=True):
    # Original simplecache
    # opener = urllib.request.build_opener(SimpleCache.CacheHandler(".AuthorCache"),
    # SimpleCache.ThrottlingProcessor(5))
    # resp = opener.open(request)
    # Simplified simplecache, no throttling, no headers as we dont use them, added cache expiry
    # we can simply cache the xml with...
    # hashfilename = hash url
    # if hashfilename exists, return its contents
    # if not, urllib2.urlopen()
    # store the xml
    # return the xml, and whether it was found in the cache
    # Need to expire the cache entries, or we won't search for anything new
    # default to 30 days for now. Authors dont write that quickly.
    #
    cacheLocation = "XMLCache"
    cacheLocation = os.path.join(lazylibrarian.CACHEDIR, cacheLocation)
    if not os.path.exists(cacheLocation):
        os.mkdir(cacheLocation)
    myhash = md5.new(my_url).hexdigest()
    valid_cache = False
    hashname = cacheLocation + os.sep + myhash + ".xml"

    if useCache and os.path.isfile(hashname):
        cache_modified_time = os.stat(hashname).st_mtime
        time_now = time.time()
        if cache_modified_time < time_now - (
                lazylibrarian.CACHE_AGE * 24 * 60 *
                60):  # expire after this many seconds
            # Cache is old, delete entry
            os.remove(hashname)
        else:
            valid_cache = True

    if valid_cache:
        lazylibrarian.CACHE_HIT = int(lazylibrarian.CACHE_HIT) + 1
        logger.debug(u"CacheHandler: Returning CACHED response for %s" %
                     my_url)
        with open(hashname, "r") as cachefile:
            source_xml = cachefile.read()
    else:
        lazylibrarian.CACHE_MISS = int(lazylibrarian.CACHE_MISS) + 1
        source_xml, success = fetchURL(my_url)
        if success:
            logger.debug(u"CacheHandler: Storing XML for %s" % my_url)
            with open(hashname, "w") as cachefile:
                cachefile.write(source_xml)
        else:
            logger.warn(u"Got error response for %s: %s" %
                        (my_url, source_xml))
            return None, False

    root = ElementTree.fromstring(source_xml)
    return root, valid_cache
Exemplo n.º 44
0
 def _get_sid(self, base_url, username, password):
     # login so we can capture SID cookie
     login_data = makeBytestr(urlencode({'username': username, 'password': password}))
     try:
         _ = self.opener.open(base_url + '/login', login_data)
     except Exception as err:
         logger.error('Error getting SID. qBittorrent %s: %s' % (type(err).__name__, str(err)))
         logger.warn('Unable to log in to %s/login' % base_url)
         return
     for cookie in self.cookiejar:
         logger.debug('login cookie: ' + cookie.name + ', value: ' + cookie.value)
     return
Exemplo n.º 45
0
def _addTorrentURI(task_cgi, sid, torurl):
    # Sends a magnet, Torrent url or NZB url to DownloadStation
    # Return task ID, or False if failed
    params = {
        "api": "SYNO.DownloadStation.Task",
        "version": "1",
        "method": "create",
        "session": "LazyLibrarian",
        "uri": torurl,
        "destination": lazylibrarian.CONFIG['SYNOLOGY_DIR'],
        "_sid": sid
    }

    result, success = _getJSON(task_cgi, params)
    logger.debug("Result from create = %s" % repr(result))
    if success:
        if not result['success']:
            errnum = result['error']['code']
            res = "Synology Create Error: %s" % _errorMsg(errnum, "create")
            logger.debug(res)
        else:
            # DownloadStation doesn't return the download_id for the newly added uri
            # which we need for monitoring progress & deleting etc.
            # so we have to scan the task list to get the id
            for task in _listTasks(task_cgi, sid):  # type: dict
                if task['status'] == 'error':
                    try:
                        errmsg = task['status_extra']['error_detail']
                    except KeyError:
                        errmsg = "No error details"
                    res = "Synology task [%s] failed: %s" % (task['title'],
                                                             errmsg)
                    logger.warn(res)
                else:
                    info = _getInfo(task_cgi, sid, task['id'])  # type: dict
                    try:
                        uri = info['additional']['detail']['uri']
                        if uri == torurl:
                            logger.debug('Synology task %s for %s' %
                                         (task['id'], task['title']))
                            return task['id'], ''
                    except KeyError:
                        res = "Unable to get uri for [%s] from getInfo" % task[
                            'title']
                        logger.debug(res)
                        return False, res
            res = "Synology URL [%s] was not found in tasklist" % torurl
            logger.debug(res)
            return False, res
    else:
        res = "Synology Failed to add task: %s" % result
        logger.debug(res)
    return False, res
Exemplo n.º 46
0
def import_book(pp_path=None, bookID=None):

    # Separated this into a function so we can more easily import books from an alternate directory
    # and move them into LL folder structure given just the bookID, returns True or False
    # eg if import_book(source_directory, bookID):
    #         ppcount = ppcount + 1
    #
    myDB = database.DBConnection()
    data = myDB.select('SELECT * from books WHERE BookID="%s"' % bookID)
    if data:
        authorname = data[0]['AuthorName']
        bookname = data[0]['BookName']

        # try:
        #    auth_dir = os.path.join(lazylibrarian.DESTINATION_DIR, authorname).encode(lazylibrarian.SYS_ENCODING)
        #    os.chmod(auth_dir, 0777)
        # except Exception, e:
        #    logger.debug("Could not chmod author directory: " + str(auth_dir))

        if 'windows' in platform.system().lower() and '/' in lazylibrarian.EBOOK_DEST_FOLDER:
            logger.warn('Please check your EBOOK_DEST_FOLDER setting')
            lazylibrarian.EBOOK_DEST_FOLDER = lazylibrarian.EBOOK_DEST_FOLDER.replace('/', '\\')

        dest_path = lazylibrarian.EBOOK_DEST_FOLDER.replace('$Author', authorname).replace('$Title', bookname)
        global_name = lazylibrarian.EBOOK_DEST_FILE.replace('$Author', authorname).replace('$Title', bookname)
        global_name = common.remove_accents(global_name)
        # Remove characters we don't want in the filename BEFORE adding to DESTINATION_DIR
        # as windows drive identifiers have colon, eg c:  but no colons allowed elsewhere?
        dic = {'<': '', '>': '', '...': '', ' & ': ' ', ' = ': ' ', '?': '', '$': 's',
               ' + ': ' ', '"': '', ',': '', '*': '', ':': '', ';': '', '\'': ''}
        dest_path = formatter.latinToAscii(formatter.replace_all(dest_path, dic))
        dest_path = os.path.join(lazylibrarian.DESTINATION_DIR, dest_path).encode(lazylibrarian.SYS_ENCODING)

        processBook = processDestination(pp_path, dest_path, authorname, bookname, global_name)

        if processBook:
            # update nzbs
            controlValueDict = {"BookID": bookID}
            newValueDict = {"Status": "Processed", "NZBDate": formatter.now()}  # say when we processed it
            myDB.upsert("wanted", newValueDict, controlValueDict)
            processExtras(myDB, dest_path, global_name, data)
            logger.info('Successfully processed: %s' % global_name)
            notifiers.notify_download(formatter.latinToAscii(global_name) + ' at ' + formatter.now())
            return True
        else:
            logger.error('Postprocessing for %s has failed.' % global_name)
            logger.error('Warning - Residual files remain in %s.fail' % pp_path)
            try:
                os.rename(pp_path, pp_path + '.fail')
            except:
                logger.debug("Unable to rename %s" % pp_path)
            return False
Exemplo n.º 47
0
    def _action(self, query, args=None, suppress=None):
        sqlResult = None
        attempt = 0

        while attempt < 5:
            try:
                if not args:
                    sqlResult = self.connection.execute(query)
                else:
                    sqlResult = self.connection.execute(query, args)
                self.connection.commit()
                break

            except sqlite3.OperationalError as e:
                if "unable to open database file" in str(
                        e) or "database is locked" in str(e):
                    logger.warn('Database Error: %s' % e)
                    logger.debug("Attempted db query: [%s]" % query)
                    attempt += 1
                    if attempt == 5:
                        logger.error("Failed db query: [%s]" % query)
                    else:
                        time.sleep(1)
                else:
                    logger.error('Database error: %s' % e)
                    logger.error("Failed query: [%s]" % query)
                    raise

            except sqlite3.IntegrityError as e:
                # we could ignore unique errors in sqlite by using "insert or ignore into" statements
                # but this would also ignore null values as we can't specify which errors to ignore :-(
                # Also the python interface to sqlite only returns english text messages, not error codes
                msg = str(e).lower()
                if suppress and 'UNIQUE' in suppress and (
                        'not unique' in msg
                        or 'unique constraint failed' in msg):
                    if lazylibrarian.LOGLEVEL & lazylibrarian.log_dbcomms:
                        logger.debug('Suppressed [%s] %s' % (query, e))
                        logger.debug("Suppressed args: [%s]" % str(args))
                    self.connection.commit()
                    break
                else:
                    logger.error('Database Integrity error: %s' % e)
                    logger.error("Failed query: [%s]" % query)
                    logger.error("Failed args: [%s]" % str(args))
                    raise

            except sqlite3.DatabaseError as e:
                logger.error('Fatal error executing %s :: %s' % (query, e))
                raise

        return sqlResult
Exemplo n.º 48
0
 def __init__(self, name=None):
     if isinstance(name, str) and hasattr(name, "decode"):
         self.name = name.decode(lazylibrarian.SYS_ENCODING)
     else:
         self.name = name
     if not lazylibrarian.CONFIG['GB_API']:
         logger.warn('No GoogleBooks API key, check config')
     self.url = 'https://www.googleapis.com/books/v1/volumes?q='
     self.params = {
         'maxResults': 40,
         'printType': 'books',
         'key': lazylibrarian.CONFIG['GB_API']
     }
Exemplo n.º 49
0
def addAuthorToDB(authorname=None, refresh=False):
    threading.currentThread().name = "DBIMPORT"

    myDB = database.DBConnection()

    GR = GoodReads(authorname)

    query = "SELECT * from authors WHERE AuthorName='%s'" % authorname.replace(
        "'", "''")
    dbauthor = myDB.action(query).fetchone()
    controlValueDict = {"AuthorName": authorname}

    if dbauthor is None:
        newValueDict = {
            "AuthorID": "0: %s" % (authorname),
            "Status": "Loading"
        }
        logger.debug("Now adding new author: %s to database" % authorname)
    else:
        newValueDict = {"Status": "Loading"}
        logger.debug("Now updating author: %s" % authorname)
    myDB.upsert("authors", newValueDict, controlValueDict)

    author = GR.find_author_id(refresh=refresh)
    if author:
        authorid = author['authorid']
        authorlink = author['authorlink']
        authorimg = author['authorimg']
        controlValueDict = {"AuthorName": authorname}
        newValueDict = {
            "AuthorID": authorid,
            "AuthorLink": authorlink,
            "AuthorImg": authorimg,
            "AuthorBorn": author['authorborn'],
            "AuthorDeath": author['authordeath'],
            "DateAdded": formatter.today(),
            "Status": "Loading"
        }
        myDB.upsert("authors", newValueDict, controlValueDict)
    else:
        logger.warn(u"Nothing found for %s" % authorname)
        myDB.action('DELETE from authors WHERE AuthorName="%s"' % authorname)
        return
# process books
    if lazylibrarian.BOOK_API == "GoogleBooks":
        book_api = GoogleBooks()
        book_api.get_author_books(authorid, authorname, refresh=refresh)
    elif lazylibrarian.BOOK_API == "GoodReads":
        GR.get_author_books(authorid, authorname, refresh=refresh)

    logger.debug("[%s] Author update complete" % authorname)
Exemplo n.º 50
0
def get_cached_request(url, useCache=True, cache="XML"):
    # hashfilename = hash of url
    # if hashfilename exists in cache and isn't too old, return its contents
    # if not, read url and store the result in the cache
    # return the result, and boolean True if source was cache
    #
    cacheLocation = cache + "Cache"
    cacheLocation = os.path.join(lazylibrarian.CACHEDIR, cacheLocation)
    if not os.path.exists(cacheLocation):
        os.mkdir(cacheLocation)
    myhash = hashlib.md5(url).hexdigest()
    valid_cache = False
    source = None
    hashfilename = cacheLocation + os.sep + myhash + "." + cache.lower()

    if useCache and os.path.isfile(hashfilename):
        cache_modified_time = os.stat(hashfilename).st_mtime
        time_now = time.time()
        expiry = lazylibrarian.CONFIG[
            'CACHE_AGE'] * 24 * 60 * 60  # expire cache after this many seconds
        if cache_modified_time < time_now - expiry:
            # Cache entry is too old, delete it
            os.remove(hashfilename)
        else:
            valid_cache = True

    if valid_cache:
        lazylibrarian.CACHE_HIT = int(lazylibrarian.CACHE_HIT) + 1
        logger.debug(u"CacheHandler: Returning CACHED response for %s" % url)
        if cache == "JSON":
            source = json.load(open(hashfilename))
        elif cache == "XML":
            with open(hashfilename, "r") as cachefile:
                result = cachefile.read()
            source = ElementTree.fromstring(result)
    else:
        lazylibrarian.CACHE_MISS = int(lazylibrarian.CACHE_MISS) + 1
        result, success = fetchURL(url)
        if success:
            logger.debug(u"CacheHandler: Storing %s for %s" % (cache, url))
            if cache == "JSON":
                source = json.loads(result)
                json.dump(source, open(hashfilename, "w"))
            elif cache == "XML":
                with open(hashfilename, "w") as cachefile:
                    cachefile.write(result)
                source = ElementTree.fromstring(result)
        else:
            logger.warn(u"Got error response for %s: %s" % (url, result))
            return None, False
    return source, valid_cache
Exemplo n.º 51
0
def processAlternate(source_dir=None):
    # import a book from an alternate directory
    if not source_dir or os.path.isdir(source_dir) is False:
        logger.warn('Alternate directory must not be empty')
        return
    if source_dir == lazylibrarian.DESTINATION_DIR:
        logger.warn('Alternate directory must not be the same as destination')
        return
    new_book = book_file(source_dir, booktype='book')
    if new_book:
        # see if there is a metadata file in this folder with the info we need
        metafile = librarysync.opf_file(source_dir)
        try:
            metadata = librarysync.get_book_info(metafile)
        except:
            metadata = {}
        if 'title' in metadata and 'creator' in metadata:
            authorname = metadata['creator']
            bookname = metadata['title']
        # if not, try to get metadata from the book file
        else:
            try:
                metadata = librarysync.get_book_info(new_book)
            except:
                metadata = {}
        if 'title' in metadata and 'creator' in metadata:
            authorname = metadata['creator']
            bookname = metadata['title']
            myDB = database.DBConnection()

            authmatch = myDB.action(
                'SELECT * FROM authors where AuthorName="%s"' %
                (authorname)).fetchone()

            if authmatch:
                logger.debug("ALT: Author %s found in database" % (authorname))
            else:
                logger.debug("ALT: Author %s not found, adding to database" %
                             (authorname))
                importer.addAuthorToDB(authorname)

            bookid = librarysync.find_book_in_db(myDB, authorname, bookname)
            if bookid:
                import_book(source_dir, bookid)
            else:
                logger.warn("Book %s by %s not found in database" %
                            (bookname, authorname))
        else:
            logger.warn('Book %s has no metadata, unable to import' % new_book)
    else:
        logger.warn("No book file found in %s" % source_dir)
Exemplo n.º 52
0
def book_file(search_dir=None, booktype=None):
    # find a book/mag file in this directory, any book will do
    # return full pathname of book/mag, or empty string if none found
    if search_dir is None or booktype is None:
        return ""
    if search_dir and os.path.isdir(search_dir):
        try:
            for fname in os.listdir(makeBytestr(search_dir)):
                fname = makeUnicode(fname)
                if is_valid_booktype(fname, booktype=booktype):
                    return os.path.join(search_dir, fname)
        except Exception as e:
            logger.warn('Listdir error [%s]: %s %s' % (search_dir, type(e).__name__, str(e)))
    return ""
Exemplo n.º 53
0
    def _notify(self, message=None, event=None, pushover_apitoken=None, pushover_keys=None, 
                notificationType=None, method=None, force=False):
        """
        Sends a pushover notification based on the provided info or LL config

        title: The title of the notification to send
        message: The message string to send
        username: The username to send the notification to (optional, defaults to the username in the config)
        force: If True then the notification will be sent even if pushover is disabled in the config
        """
        try:
            message = common.removeDisallowedFilenameChars(message)
        except Exception, e:
            logger.warn("Pushover: could not convert  message: %s" % e)
Exemplo n.º 54
0
def getLatestVersion_FromGit():
    latest_version = 'Unknown'

    # Can only work for non Windows driven installs, so check install type
    if lazylibrarian.CONFIG['INSTALL_TYPE'] == 'win':
        logger.debug(
            '(getLatestVersion_FromGit) Code Error - Windows install - should not be called under a windows install'
        )
        latest_version = 'WINDOWS INSTALL'
    else:
        # check current branch value of the local git repo as folks may pull from a branch not master
        branch = lazylibrarian.CONFIG['GIT_BRANCH']

        if branch == 'InvalidBranch':
            logger.debug(
                '(getLatestVersion_FromGit) - Failed to get a valid branch name from local repo'
            )
        else:
            if branch == 'Package':  # check packages against master
                branch = 'master'
            # Get the latest commit available from github
            url = 'https://api.github.com/repos/%s/%s/commits/%s' % (
                lazylibrarian.CONFIG['GIT_USER'],
                lazylibrarian.CONFIG['GIT_REPO'], branch)
            logger.debug(
                '(getLatestVersion_FromGit) Retrieving latest version information from github command=[%s]'
                % url)
            try:
                request = urllib2.Request(url)
                request.add_header('User-Agent', USER_AGENT)
                resp = urllib2.urlopen(request, timeout=30)
                result = resp.read()
                git = simplejson.JSONDecoder().decode(result)
                latest_version = git['sha']
                logger.debug(
                    '(getLatestVersion_FromGit) Branch [%s] Latest Version has been set to [%s]'
                    % (branch, latest_version))
            except Exception as e:
                logger.warn(
                    '(getLatestVersion_FromGit) Could not get the latest commit from github'
                )
                if hasattr(e, 'reason'):
                    errmsg = e.reason
                else:
                    errmsg = str(e)

                logger.debug('git error for %s: %s' % (url, errmsg))
                latest_version = 'Not_Available_From_GitHUB'

    return latest_version
Exemplo n.º 55
0
def shutdown(restart=False, update=False):

    cherrypy.engine.exit()
    SCHED.shutdown(wait=False)
    config_write()

    if not restart and not update:
        logger.info('LazyLibrarian is shutting down...')
    if update:
        logger.info('LazyLibrarian is updating...')
        try:
            versioncheck.update()
        except Exception, e:
            logger.warn('LazyLibrarian failed to update: %s. Restarting.' % e)
Exemplo n.º 56
0
def fetchURL(URL, headers=None):
    """ Return the result of fetching a URL and True if success
        Otherwise return error message and False
        Allow one retry on timeout """
    request = urllib2.Request(URL)
    if lazylibrarian.PROXY_HOST:
        request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE)
    if headers is None:
        # google insists on having a user-agent
        request.add_header('User-Agent', USER_AGENT)
    if headers is not None:
        for item in headers:
            request.add_header(item, headers[item])
    try:
        resp = urllib2.urlopen(request, timeout=30)
        if str(resp.getcode()).startswith("2"):
            # (200 OK etc)
            try:
                result = resp.read()
            except socket.error as e:
                return str(e), False
            return result, True
        else:
            return str(resp), False
    except (socket.timeout) as e:
        logger.warn(u"fetchURL: retrying - got timeout on %s" % URL)
        try:
            resp = urllib2.urlopen(request, timeout=30)  # don't get stuck
            if str(resp.getcode()).startswith("2"):
                # (200 OK etc)
                try:
                    result = resp.read()
                except socket.error as e:
                    return str(e), False
                return result, True
            else:
                return str(resp), False
        except (socket.timeout) as e:
            logger.error(u"fetchURL: Timeout getting response from %s" % URL)
            return str(e), False
        except (urllib2.URLError) as e:
            logger.error(u"fetchURL: Error getting response for %s: %s" %
                         (URL, e.reason))
            return e.reason, False
    except (urllib2.HTTPError, urllib2.URLError, ssl.SSLError) as e:
        if hasattr(e, 'reason'):
            return e.reason, False
        else:
            return str(e), False
Exemplo n.º 57
0
def DownloadMethod(bookid=None, nzbprov=None, nzbtitle=None, nzburl=None):

    myDB = database.DBConnection()

    if lazylibrarian.SAB_HOST and not lazylibrarian.NZB_DOWNLOADER_BLACKHOLE:
        download = sabnzbd.SABnzbd(nzbtitle, nzburl)

    elif lazylibrarian.NZBGET_HOST and not lazylibrarian.NZB_DOWNLOADER_BLACKHOLE:
        headers = {'User-Agent': USER_AGENT}
        data = request.request_content(url=nzburl, headers=headers)
        nzb = classes.NZBDataSearchResult()
        nzb.extraInfo.append(data)
        nzb.name = nzbtitle
        nzb.url = nzburl
        download = nzbget.sendNZB(nzb)

    elif lazylibrarian.NZB_DOWNLOADER_BLACKHOLE:

        try:
            req = urllib2.Request(nzburl)
            if lazylibrarian.PROXY_HOST:
                req.set_proxy(lazylibrarian.PROXY_HOST,
                              lazylibrarian.PROXY_TYPE)
            req.add_header('User-Agent', USER_AGENT)
            nzbfile = urllib2.urlopen(req, timeout=90).read()

        except urllib2.URLError, e:
            logger.warn('Error fetching nzb from url: ' + nzburl + ' %s' % e)
            nzbfile = False

        if (nzbfile):

            nzbname = str(nzbtitle) + '.nzb'
            nzbpath = os.path.join(lazylibrarian.NZB_BLACKHOLEDIR, nzbname)

            try:
                f = open(nzbpath, 'w')
                f.write(nzbfile)
                f.close()
                logger.info('NZB file saved to: ' + nzbpath)
                download = True
                try:
                    os.chmod(nzbpath, 0777)
                except Exception, e:
                    logger.info("Could not chmod path: " + str(file2))
            except Exception, e:
                logger.error('%s not writable, NZB not saved. Error: %s' %
                             (nzbpath, e))
                download = False
Exemplo n.º 58
0
def removeTorrent(torrentid, remove_data=False):
    global rpc_version

    method = 'torrent-get'
    arguments = {
        'ids': [torrentid],
        'fields': ['isFinished', 'name', 'status']
    }

    response, _ = torrentAction(method, arguments)  # type: dict
    if not response:
        return False

    try:
        finished = response['arguments']['torrents'][0]['isFinished']
        name = response['arguments']['torrents'][0]['name']
        status = response['arguments']['torrents'][0]['status']
        remove = False
        if finished:
            logger.debug('%s has finished seeding, removing torrent and data' %
                         name)
            remove = True
        elif not lazylibrarian.CONFIG['SEED_WAIT']:
            if (rpc_version < 14 and status == 8) or (rpc_version >= 14
                                                      and status in [5, 6]):
                logger.debug(
                    '%s is seeding, removing torrent and data anyway' % name)
                remove = True
        if remove:
            method = 'torrent-remove'
            if remove_data:
                arguments = {'delete-local-data': True, 'ids': [torrentid]}
            else:
                arguments = {'ids': [torrentid]}
            _, _ = torrentAction(method, arguments)
            return True
        else:
            logger.debug(
                '%s has not finished seeding, torrent will not be removed' %
                name)
    except IndexError:
        # no torrents, already removed?
        return True
    except Exception as e:
        logger.warn('Unable to remove torrent %s, %s %s' %
                    (torrentid, type(e).__name__, str(e)))
        return False

    return False
Exemplo n.º 59
0
def processAutoAdd(src_path=None):
    # Called to copy the book files to an auto add directory for the likes of Calibre which can't do nested dirs
    autoadddir = lazylibrarian.IMP_AUTOADD
    logger.debug('AutoAdd - Attempt to copy from [%s] to [%s]' %
                 (src_path, autoadddir))

    if not os.path.exists(autoadddir):
        logger.error(
            'AutoAdd directory [%s] is missing or not set - cannot perform autoadd copy'
            % autoadddir)
        return False
    else:
        # Now try and copy all the book files into a single dir.

        try:
            names = os.listdir(src_path)
            # TODO : n files jpg, opf & book(s) should have same name
            # Caution - book may be pdf, mobi, epub or all 3.
            # for now simply copy all files, and let the autoadder sort it out
            #
            # Update - seems Calibre only uses the ebook, not the jpeg or opf files
            # and only imports one format of each ebook, treats the others as duplicates
            # Maybe need to rewrite this so we only copy the first ebook we find and ignore everything else
            #
            for name in names:
                srcname = os.path.join(src_path, name)
                dstname = os.path.join(autoadddir, name)
                logger.debug('AutoAdd Copying file [%s] as copy [%s] to [%s]' %
                             (name, srcname, dstname))
                try:
                    shutil.copyfile(srcname, dstname)
                except Exception as why:
                    logger.error(
                        'AutoAdd - Failed to copy file [%s] because [%s] ' %
                        (name, str(why)))
                    return False
                try:
                    os.chmod(dstname, 0o666)  # make rw for calibre
                except OSError as why:
                    logger.warn("Could not set permission of %s because [%s]" %
                                (dstname, why.strerror))
                    # permissions might not be fatal, continue

        except OSError as why:
            logger.error('AutoAdd - Failed because [%s]' % why.strerror)
            return False

    logger.info('Auto Add completed for [%s]' % dstname)
    return True
Exemplo n.º 60
0
def logmsg(level, msg):
    # log messages to logger if initialised, or print if not.
    if lazylibrarian.__INITIALIZED__:
        if level == 'error':
            logger.error(msg)
        elif level == 'info':
            logger.info(msg)
        elif level == 'debug':
            logger.debug(msg)
        elif level == 'warn':
            logger.warn(msg)
        else:
            logger.info(msg)
    else:
        print(level.upper(), msg)