Esempio n. 1
0
def processDestination(pp_path=None, dest_path=None, authorname=None, bookname=None):

    try:
        if not os.path.exists(dest_path):
            logger.debug('%s does not exist, so it\'s safe to create it' % dest_path)
        else:
            logger.debug('%s already exsists. It will be overwritten' % dest_path)
            logger.debug('Removing exsisting tree')
            shutil.rmtree(dest_path)

        logger.debug('Attempting to move tree')
        shutil.move(pp_path, dest_path)
        logger.debug('Successfully copied %s to %s.' % (pp_path, dest_path))

        pp = True
        
        #try and rename the actual book file
        for file2 in os.listdir(dest_path):
            logger.debug('file extension: ' + str(file2).split('.')[-1])
            if ((file2.lower().find(".jpg") <= 0) & (file2.lower().find(".opf") <= 0)):
                logger.debug('file: ' + str(file2))
                os.rename(os.path.join(dest_path, file2), os.path.join(dest_path, bookname + '.' + str(file2).split('.')[-1]))
        try:
            os.chmod(dest_path, 0777);
        except Exception, e:
            logger.debug("Could not chmod path: " + str(dest_path));
    except OSError:
        logger.info('Could not create destination folder or rename the downloaded ebook. Check permissions of: ' + lazylibrarian.DESTINATION_DIR)
        pp = False
    return pp
def SABnzbd(title=None, nzburl=None):
    # Changes https to http
    HOST = lazylibrarian.SAB_HOST + ":" + lazylibrarian.SAB_PORT
    if not str(HOST)[:4] == "http":
        HOST = "http://" + HOST

    params = {}
    # Login for user
    params["mode"] = "addurl"
    params["name"] = nzburl
    # Checks that all are defined and nothing is missing
    if lazylibrarian.SAB_USER:
        params["ma_username"] = lazylibrarian.SAB_USER
    if lazylibrarian.SAB_PASS:
        params["ma_password"] = lazylibrarian.SAB_PASS
    if lazylibrarian.SAB_API:
        params["apikey"] = lazylibrarian.SAB_API
    if lazylibrarian.SAB_CAT:
        params["cat"] = lazylibrarian.SAB_CAT

    if lazylibrarian.USENET_RETENTION:
        params["maxage"] = lazylibrarian.USENET_RETENTION

    ## FUTURE-CODE
    #    if lazylibrarian.SAB_PRIO:
    #        params["priority"] = lazylibrarian.SAB_PRIO
    #    if lazylibrarian.SAB_PP:
    #        params["script"] = lazylibrarian.SAB_SCRIPT
    #    Encodes parameters
    URL = HOST + "/api?" + urllib.parse.urlencode(params)

    # to debug because of api
    logger.debug('Request url for <a href="%s">SABnzbd</a>' % URL)

    try:
        request = urllib.request.urlopen(URL)

    except (EOFError, IOError) as e:
        logger.error("Unable to connect to SAB with URL: %s" % url)
        return False

    except httplib.InvalidURL as e:
        logger.error("Invalid SAB host, check your config. Current host: %s" % HOST)
        return False

    result = request.read().strip()
    if not result:
        log.error("SABnzbd didn't return anything.")
        return False

    logger.debug("Result text from SAB: " + result)
    if result == "ok":
        logger.info("NZB sent to SAB successfully.")
        return True
    elif result == "Missing authentication":
        logger.error("Incorrect username/password.")
        return False
    else:
        logger.error("Unknown error: " + result)
        return False
Esempio n. 3
0
def parse_date (title):

    logger.info("Trying to parse [%s]" % title)

    search_strings = [r'(?P<Day>\d+)\s+(?P<Month>January|February|March|April|May|June|July|August|September|October|November|December)\s+(?P<Year>\d{4})', \
                      r'(?P<Day>\d+)(st|nd|rd|th)\s+(?P<Month>January|February|March|April|May|June|July|August|September|October|November|December)\s+(?P<Year>\d{4})', \
                      r'\s(?P<Month>January|February|March|April|May|June|July|August|September|October|November|December)\s+(?P<Day>\d+)\s+(?P<Year>\d{4})', \
                      r'\s(?P<Month>January|February|March|April|May|June|July|August|September|October|November|December)\s+(?P<Year>\d{4})']

    Year = ''
    Day = ''
    Month = ''
    for regex in search_strings:
        match = re.search(regex, title)
        if match:
            Year = match.group('Year')
            Month = match.group('Month')
            try:
                Day = match.group('Day')
            except:
                Day = '01'
                
            print "MATCHED " + Day + " " + Month + " " + Year
            break


    if Year == '':
        print "NO MATCH"
        
    return (Year, Month, Day)
Esempio n. 4
0
def processAutoAdd(src_path=None):
    # Called to copy the book files to an auto add directory for the likes of Calibre which can't do nested dirs
    autoadddir = lazylibrarian.IMP_AUTOADD
    logger.debug('AutoAdd - Attempt to copy from [%s] to [%s]' % (src_path, autoadddir))

    if not os.path.exists(autoadddir):
        logger.error('AutoAdd directory [%s] is missing or not set - cannot perform autoadd copy' % autoadddir)
        return False
    else:
        # Now try and copy all the book files into a single dir.

        try:
            names = os.listdir(src_path)
            # TODO : n files jpg, opf & book(s) should have same name
            # Caution - book may be pdf, mobi, epub or all 3.
            # for now simply copy all files, and let the autoadder sort it out

            # os.makedirs(autoadddir)
            #errors = []
            for name in names:
                srcname = os.path.join(src_path, name)
                dstname = os.path.join(autoadddir, name)
                logger.debug('AutoAdd Copying named file [%s] as copy [%s] to [%s]' % (name, srcname, dstname))
                try:
                    shutil.copy2(srcname, dstname)
                except (IOError, os.error) as why:
                    logger.error('AutoAdd - Failed to copy file because [%s] ' % str(why))

        except OSError as why:
            logger.error('AutoAdd - Failed because [%s]' % str(why))
            return False

    logger.info('Auto Add completed for [%s]' % dstname)
    return True
Esempio n. 5
0
def checkUpdate():
    logger.info(u'Checking for updates.')
    # Get local version if necessary and check remote branch
    if not lazylibrarian.CURRENT_VERSION:
        lazylibrarian.CURRENT_VERSION = getVersion()
    lazylibrarian.LATEST_VERSION = checkGithub()
    return
Esempio n. 6
0
def DownloadMethod(bookid=None, nzbprov=None, nzbtitle=None, nzburl=None):

    myDB = database.DBConnection()

    if lazylibrarian.SAB_HOST and not lazylibrarian.BLACKHOLE:
        download = sabnzbd.SABnzbd(nzbtitle, nzburl)

    elif lazylibrarian.BLACKHOLE:

        try:
            nzbfile = urllib2.urlopen(nzburl, timeout=30).read()

        except urllib2.URLError, e:
            logger.warn('Error fetching nzb from url: ' + nzburl + ' %s' % e)

        nzbname = str.replace(nzbtitle, ' ', '_') + '.nzb'
        nzbpath = os.path.join(lazylibrarian.BLACKHOLEDIR, nzbname)

        try:
            f = open(nzbpath, 'w')
            f.write(nzbfile)
            f.close()
            logger.info('NZB file saved to: ' + nzbpath)
            download = True
        except Exception, e:
            logger.error('%s not writable, NZB not saved. Error: %s' % (nzbpath, e))
            download = False
Esempio n. 7
0
def removeTorrent(torrentid, remove_data=False):

    method = 'torrent-get'
    arguments = {'ids': torrentid, 'fields': ['isFinished', 'name']}

    response = torrentAction(method, arguments)
    if not response:
        return False

    try:
        finished = response['arguments']['torrents'][0]['isFinished']
        name = response['arguments']['torrents'][0]['name']

        if finished:
            logger.info('%s has finished seeding, removing torrent and data' % name)
            method = 'torrent-remove'
            if remove_data:
                arguments = {'delete-local-data': True, 'ids': torrentid}
            else:
                arguments = {'ids': torrentid}
            response = torrentAction(method, arguments)
            return True
        else:
            logger.info('%s has not finished seeding yet, torrent will not be removed, will try again on next run' % name)
    except:
        return False

    return False
Esempio n. 8
0
def addTorrent(link):
    method = "torrent-add"
    # print type(link), link
    # if link.endswith('.torrent'):
    #    with open(link, 'rb') as f:
    #        metainfo = str(base64.b64encode(f.read()))
    #    arguments = {'metainfo': metainfo }
    # else:
    arguments = {"filename": link, "download-dir": lazylibrarian.DOWNLOAD_DIR}

    response = torrentAction(method, arguments)

    if not response:
        return False

    if response["result"] == "success":
        if "torrent-added" in response["arguments"]:
            retid = response["arguments"]["torrent-added"]["hashString"]
        elif "torrent-duplicate" in response["arguments"]:
            retid = response["arguments"]["torrent-duplicate"]["hashString"]
        else:
            retid = False

        logger.info(u"Torrent sent to Transmission successfully")
        return retid

    else:
        logger.info("Transmission returned status %s" % response["result"])
        return False
Esempio n. 9
0
def sendNZB(nzb):

    addToTop = False
    nzbgetXMLrpc = "%(username)s:%(password)s@%(host)s/xmlrpc"

    if lazylibrarian.NZBGET_HOST is None:
        logger.error(u"No NZBget host found in configuration. Please configure it.")
        return False

    if lazylibrarian.NZBGET_HOST.startswith("https://"):
        nzbgetXMLrpc = "https://" + nzbgetXMLrpc
        lazylibrarian.NZBGET_HOST.replace("https://", "", 1)
    else:
        nzbgetXMLrpc = "http://" + nzbgetXMLrpc
        lazylibrarian.NZBGET_HOST.replace("http://", "", 1)

    url = nzbgetXMLrpc % {
        "host": lazylibrarian.NZBGET_HOST,
        "username": lazylibrarian.NZBGET_USER,
        "password": lazylibrarian.NZBGET_PASS,
    }

    nzbGetRPC = xmlrpclib.ServerProxy(url)
    try:
        if nzbGetRPC.writelog("INFO", "lazylibrarian connected to drop of %s any moment now." % (nzb.name + ".nzb")):
            logger.debug(u"Successfully connected to NZBget")
        else:
            logger.info(u"Successfully connected to NZBget, but unable to send a message" % (nzb.name + ".nzb"))

    except httplib.socket.error, e:
        logger.error(
            u"Please check your NZBget host and port (if it is running). NZBget is not responding to this combination"
        )
        return False
Esempio n. 10
0
def OLDUsenetCrawler(book=None):


    HOST = lazylibrarian.USENETCRAWLER_HOST
    results = []
    
    print book.keys()
    
    logger.info('UsenetCrawler: Searching term [%s] for author [%s] and title [%s]' % (book['searchterm'], book['authorName'], book['bookName']))
    
    params = {
        "apikey": lazylibrarian.USENETCRAWLER_API,
        "t": "book",
        "title": book['bookName'],
        "author": book['authorName']
        }
    
    #sample request
    #http://www.usenet-crawler.com/api?apikey=7xxxxxxxxxxxxxyyyyyyyyyyyyyyzzz4&t=book&author=Daniel

    logger.debug("%s" % params)
    
    if not str(HOST)[:4] == "http":
        HOST = 'http://' + HOST
    
    URL = HOST + '/api?' + urllib.urlencode(params)
    
    logger.debug('UsenetCrawler: searching on [%s] ' % URL)
    
    data = None    
    try:
        data = ElementTree.parse(urllib2.urlopen(URL, timeout=30))
    except (urllib2.URLError, IOError, EOFError), e:
        logger.Error('Error fetching data from %s: %s' % (HOST, e))
        data = None
Esempio n. 11
0
def exportCSV(search_dir=None, status="Wanted"):
    """ Write a csv file to the search_dir containing all books marked as "Wanted" """
     
    if not search_dir:
        logger.warn("Alternate Directory must not be empty")
        return False
    
    csvFile = os.path.join(search_dir, "%s - %s.csv" % (status, formatter.now()))  
    
    myDB = database.DBConnection() 
    
    find_status = myDB.select('SELECT * FROM books WHERE Status = "%s"' % status)
    
    if not find_status:
        logger.warn("No books marked as %s" % status)
    else:
        with open(csvFile, 'wb') as csvfile:
            csvwrite = csv.writer(csvfile, delimiter=',',
                quotechar='"', quoting=csv.QUOTE_MINIMAL)
                
            # write headers, change AuthorName BookName BookIsbn to match import csv names (Author, Title, ISBN10)
            csvwrite.writerow([
                'BookID', 'Author', 'Title', 
                'ISBN', 'AuthorID'
                ])
        
            for resulted in find_status:
                logger.debug("Exported CSV for book %s" % resulted['BookName'].encode('utf-8'))
                row = ([
                    resulted['BookID'], resulted['AuthorName'], resulted['BookName'], 
                    resulted['BookIsbn'], resulted['AuthorID']       
                    ])
                csvwrite.writerow([("%s" % s).encode('utf-8') for s in row])
        logger.info("CSV exported to %s" % csvFile)
Esempio n. 12
0
    def markMags(self, AuthorName=None, action=None, redirect=None, **args):
        myDB = database.DBConnection()
        if not redirect:
            redirect = "magazines"
        authorcheck = None
	maglist = []
        for nzburl in args:
            # ouch dirty workaround...
            if not nzburl == 'book_table_length':
                    controlValueDict = {'NZBurl': nzburl}
                    newValueDict = {'Status': action}
                    myDB.upsert("wanted", newValueDict, controlValueDict)
                    title = myDB.select("SELECT * from wanted WHERE NZBurl = ?", [nzburl])
                    for item in title:
			bookid = item['BookID']    
			nzbprov = item['NZBprov']                    
			nzbtitle = item['NZBtitle']
			nzburl = item['NZBurl']
			maglist.append({
				'bookid': bookid,
				'nzbprov': nzbprov,
				'nzbtitle': nzbtitle,
				'nzburl': nzburl
				})
                    logger.info('Status set to %s for %s' % (action, nzbtitle))

        # start searchthreads
        if action == 'Wanted':
          	for items in maglist:
			logger.debug('Snatching %s' % items['nzbtitle'])
            		snatch = DownloadMethod(items['bookid'], items['nzbprov'], items['nzbtitle'], items['nzburl'])
            		notifiers.notify_snatch(items['nzbtitle']+' at '+formatter.now()) 
      		raise cherrypy.HTTPRedirect("magazines")
Esempio n. 13
0
def addTorrent(link):
    method = 'torrent-add'
    # print type(link), link
    # if link.endswith('.torrent'):
    #    with open(link, 'rb') as f:
    #        metainfo = str(base64.b64encode(f.read()))
    #    arguments = {'metainfo': metainfo }
    # else:
    arguments = {'filename': link}

    response = torrentAction(method, arguments)

    if not response:
        return False

    if response['result'] == 'success':
        if 'torrent-added' in response['arguments']:
            retid = response['arguments']['torrent-added']['hashString']
        elif 'torrent-duplicate' in response['arguments']:
            retid = response['arguments']['torrent-duplicate']['hashString']
        else:
            retid = False

        logger.info(u"Torrent sent to Transmission successfully")
        return retid

    else:
        logger.info('Transmission returned status %s' % response['result'])
        return False
Esempio n. 14
0
def addTorrent(link):
    method = "torrent-add"

    if link.endswith(".torrent"):
        with open(link, "rb") as f:
            metainfo = str(base64.b64encode(f.read()))
        arguments = {"metainfo": metainfo}
    else:
        arguments = {"filename": link}

    response = torrentAction(method, arguments)

    if not response:
        return False

    if response["result"] == "success":
        if "torrent-added" in response["arguments"]:
            retid = response["arguments"]["torrent-added"]["hashString"]
        elif "torrent-duplicate" in response["arguments"]:
            retid = response["arguments"]["torrent-duplicate"]["hashString"]
        else:
            retid = False

        logger.info(u"Torrent sent to Transmission successfully")
        return retid

    else:
        logger.info("Transmission returned status %s" % response["result"])
        return False
Esempio n. 15
0
    def fetchData(self):

        threadname = threading.currentThread().name
        if "Thread-" in threadname:
            threading.currentThread().name = "API"

        if self.data == 'OK':
            args = []
            if 'name' in self.kwargs:
                args.append({"name": self.kwargs['name']})
            if 'id' in self.kwargs:
                args.append({"id": self.kwargs['id']})
            if 'group' in self.kwargs:
                args.append({"group": self.kwargs['group']})
            if 'value' in self.kwargs:
                args.append({"value": self.kwargs['value']})
            if 'wait' in self.kwargs:
                args.append({"wait": "True"})
            if args == []:
                args = ''
            logger.info('Received API command: %s %s' % (self.cmd, args))
            methodToCall = getattr(self, "_" + self.cmd)
            methodToCall(**self.kwargs)
            if 'callback' not in self.kwargs:
                if isinstance(self.data, basestring):
                    return self.data
                else:
                    return json.dumps(self.data)
            else:
                self.callback = self.kwargs['callback']
                self.data = json.dumps(self.data)
                self.data = self.callback + '(' + self.data + ');'
                return self.data
        else:
            return self.data
Esempio n. 16
0
    def _sendPushbullet(self, message=None, event=None, pushbullet_token=None, pushbullet_deviceid=None, force=False):

        if not lazylibrarian.USE_PUSHBULLET and not force:
            return False

        if pushbullet_token == None:
            pushbullet_token = lazylibrarian.PUSHBULLET_TOKEN
        if pushbullet_deviceid == None:
            if lazylibrarian.PUSHBULLET_DEVICEID:
                pushbullet_deviceid = lazylibrarian.PUSHBULLET_DEVICEID

        logger.debug("Pushbullet event: " + str(event))
        logger.debug("Pushbullet message: " + str(message))
        logger.debug("Pushbullet api: " + str(pushbullet_token))
        logger.debug("Pushbullet devices: " + str(pushbullet_deviceid))

        pb = PushBullet(str(pushbullet_token))
        
        if event == 'LLTest': # special case, return device list
            devices = pb.getDevices()
            ret = ""
            for device in devices:
                logger.info("Pushbullet: %s [%s]" % (device["nickname"], device["iden"]))
                ret += "\nPushbullet: %s [%s]" % (device["nickname"], device["iden"])
            push = pb.pushNote(pushbullet_deviceid, str(event), str(message))
            return ret
        else:
            push = pb.pushNote(pushbullet_deviceid, str(event), str(message))
            return push
Esempio n. 17
0
def export_CSV(search_dir=None, status="Wanted", library='eBook'):
    """ Write a csv file to the search_dir containing all books marked as "Wanted" """
    # noinspection PyBroadException
    try:
        if not search_dir:
            msg = "Alternate Directory not configured"
            logger.warn(msg)
            return msg
        elif not os.path.isdir(search_dir):
            msg = "Alternate Directory [%s] not found" % search_dir
            logger.warn(msg)
            return msg
        elif not os.access(search_dir, os.W_OK | os.X_OK):
            msg = "Alternate Directory [%s] not writable" % search_dir
            logger.warn(msg)
            return msg

        csvFile = os.path.join(search_dir, "%s %s - %s.csv" % (status, library, now().replace(':', '-')))

        myDB = database.DBConnection()

        cmd = 'SELECT BookID,AuthorName,BookName,BookIsbn,books.AuthorID FROM books,authors '
        if library == 'eBook':
            cmd += 'WHERE books.Status=? and books.AuthorID = authors.AuthorID'
        else:
            cmd += 'WHERE AudioStatus=? and books.AuthorID = authors.AuthorID'
        find_status = myDB.select(cmd, (status,))

        if not find_status:
            msg = "No %s marked as %s" % (library, status)
            logger.warn(msg)
        else:
            count = 0
            if PY2:
                fmode = 'wb'
            else:
                fmode = 'w'
            with open(csvFile, fmode) as csvfile:
                csvwrite = writer(csvfile, delimiter=',',
                                  quotechar='"', quoting=QUOTE_MINIMAL)

                # write headers, change AuthorName BookName BookIsbn to match import csv names
                csvwrite.writerow(['BookID', 'Author', 'Title', 'ISBN', 'AuthorID'])

                for resulted in find_status:
                    logger.debug("Exported CSV for %s %s" % (library, resulted['BookName']))
                    row = ([resulted['BookID'], resulted['AuthorName'], resulted['BookName'],
                            resulted['BookIsbn'], resulted['AuthorID']])
                    if PY2:
                        csvwrite.writerow([("%s" % s).encode(lazylibrarian.SYS_ENCODING) for s in row])
                    else:
                        csvwrite.writerow([("%s" % s) for s in row])
                    count += 1
            msg = "CSV exported %s %s%s to %s" % (count, library, plural(count), csvFile)
            logger.info(msg)
        return msg
    except Exception:
        msg = 'Unhandled exception in exportCSV: %s' % traceback.format_exc()
        logger.error(msg)
        return msg
Esempio n. 18
0
def shutdown(restart=False, update=False):

    cherrypy.engine.exit()
    SCHED.shutdown(wait=False)
    config_write()

    if not restart and not update:
        logger.info("LazyLibrarian is shutting down...")
    if update:
        logger.info("LazyLibrarian is updating...")
        try:
            versioncheck.update()
        except Exception as e:
            logger.warn("LazyLibrarian failed to update: %s. Restarting." % e)

    if PIDFILE:
        logger.info("Removing pidfile %s" % PIDFILE)
        os.remove(PIDFILE)

    if restart:
        logger.info("LazyLibrarian is restarting ...")
        popen_list = [sys.executable, FULL_PATH]
        popen_list += ARGS
        if "--nolaunch" not in popen_list:
            popen_list += ["--nolaunch"]
            logger.info("Restarting LazyLibrarian with " + str(popen_list))
        subprocess.Popen(popen_list, cwd=os.getcwd())

    os._exit(0)
Esempio n. 19
0
def getLatestVersionaFromGit():
    latest_version = 'Unknown'
    
    #Can only work for non Windows driven installs, so check install type
    if lazylibrarian.INSTALL_TYPE == 'win':
        logger.debug('(getLatestVersionaFromGit) Code Error - Windows install - should not be called under a windows install')
        latest_version = 'WINDOWS INSTALL'
    else:
        #check current branch value of the local git repo as folks may pull from a branch not master
        branch = lazylibrarian.CURRENT_BRANCH
        
        if (branch == 'InvalidBranch'):
            logger.debug('(getLatestVersionaFromGit) - Failed to get a valid branch name from local repo')
        else:

            # Get the latest commit available from github
            url = 'https://api.github.com/repos/%s/%s/commits/%s' % (user, repo, branch)
            logger.info ('(getLatestVersionaFromGit) Retrieving latest version information from github command=[%s]' % url)
            try:
                result = urllib2.urlopen(url).read()
                git = simplejson.JSONDecoder().decode(result)
                latest_version = git['sha']
                logger.debug('(getLatestVersionaFromGit) Branch [%s] has Latest Version has been set to [%s]' % (branch, latest_version))
            except:
                logger.warn('(getLatestVersionaFromGit) Could not get the latest commit from github')
                latest_version = 'Not_Available_From_GitHUB'

    return latest_version
Esempio n. 20
0
def NewzNabPlus(book=None, host=None, api_key=None, searchType=None):


    #logger.info('[NewzNabPlus] Searching term [%s] for author [%s] and title [%s] on host [%s] for a [%s] item' % (book['searchterm'], book['authorName'], book['bookName'], host, searchType))
    logger.info('[NewzNabPlus] searchType [%s] with Host [%s] using api [%s] for item [%s]'%(searchType, host, api_key,str(book)))
    
    
    results = []  
    
    params = ReturnSearchTypeStructure(api_key, book, searchType)

    if not str(host)[:4] == "http":
        host = 'http://' + host
    
    URL = host + '/api?' + urllib.urlencode(params)

    try :
        request = urllib2.Request(URL)
	if lazylibrarian.PROXY_HOST:
		request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE)
        request.add_header('User-Agent', common.USER_AGENT)
        opener = urllib2.build_opener(SimpleCache.CacheHandler(".ProviderCache"), SimpleCache.ThrottlingProcessor(5))
        resp = opener.open(request)

        try:
            data = ElementTree.parse(resp)
        except (urllib2.URLError, IOError, EOFError), e:
            logger.warn('Error fetching data from %s: %s' % (host, e))
            data = None

    except Exception, e:
        logger.error("Error 403 openning url %s" % e)
        data = None
Esempio n. 21
0
def setAllBookSeries():
    """ Try to set series details for all books """
    myDB = database.DBConnection()
    books = myDB.select('select BookID,WorkID,BookName from books where Manual is not "1"')
    counter = 0
    if books:
        logger.info('Checking series for %s book%s' % (len(books), plural(len(books))))
        for book in books:
            if lazylibrarian.CONFIG['BOOK_API'] == 'GoodReads':
                workid = book['WorkID']
                if not workid:
                    logger.debug("No workid for book %s: %s" % (book['BookID'], book['BookName']))
            else:
                workid = book['BookID']
                if not workid:
                    logger.debug("No bookid for book: %s" % book['BookName'])
            if workid:
                serieslist = getWorkSeries(workid)
                if serieslist:
                    counter += 1
                    setSeries(serieslist, book['BookID'])
    deleteEmptySeries()
    msg = 'Updated %s book%s' % (counter, plural(counter))
    logger.info('Series check complete: ' + msg)
    return msg
Esempio n. 22
0
def export_CSV(search_dir=None, status="Wanted"):
    """ Write a csv file to the search_dir containing all books marked as "Wanted" """

    if not search_dir or os.path.isdir(search_dir) is False:
        logger.warn("Please check Alternate Directory setting")
        return False

    csvFile = os.path.join(search_dir, "%s - %s.csv" % (status, now().replace(':', '-')))

    myDB = database.DBConnection()

    find_status = myDB.select('SELECT * FROM books WHERE Status = "%s"' % status)

    if not find_status:
        logger.warn(u"No books marked as %s" % status)
    else:
        count = 0
        with open(csvFile, 'wb') as csvfile:
            csvwrite = csv.writer(csvfile, delimiter=',',
                                  quotechar='"', quoting=csv.QUOTE_MINIMAL)

            # write headers, change AuthorName BookName BookIsbn to match import csv names (Author, Title, ISBN10)
            csvwrite.writerow(['BookID', 'Author', 'Title', 'ISBN', 'AuthorID'])

            for resulted in find_status:
                logger.debug(u"Exported CSV for book %s" % resulted['BookName'])
                row = ([resulted['BookID'], resulted['AuthorName'], resulted['BookName'],
                        resulted['BookIsbn'], resulted['AuthorID']])
                csvwrite.writerow([("%s" % s).encode(lazylibrarian.SYS_ENCODING) for s in row])
                count = count + 1
        logger.info(u"CSV exported %s book%s to %s" % (count, plural(count), csvFile))
Esempio n. 23
0
 def resumeAuthor(self, AuthorID, AuthorName):
     logger.info(u"Resuming author: " + AuthorID)
     myDB = database.DBConnection()
     controlValueDict = {'AuthorID': AuthorID}
     newValueDict = {'Status': 'Active'}
     myDB.upsert("authors", newValueDict, controlValueDict)
     raise cherrypy.HTTPRedirect("authorPage?AuthorName=%s" % AuthorName)
Esempio n. 24
0
def getAuthorImages():
    """ Try to get an author image for all authors without one"""
    myDB = database.DBConnection()
    cmd = 'select AuthorID, AuthorName from authors where (AuthorImg like "%nophoto%" or AuthorImg is null)'
    cmd += ' and Manual is not "1"'
    authors = myDB.select(cmd)
    if authors:
        logger.info('Checking images for %s author%s' % (len(authors), plural(len(authors))))
        counter = 0
        for author in authors:
            authorid = author['AuthorID']
            imagelink = getAuthorImage(authorid)
            newValueDict = {}
            if not imagelink:
                logger.debug('No image found for %s' % author['AuthorName'])
                newValueDict = {"AuthorImg": 'images/nophoto.png'}
            elif 'nophoto' not in imagelink:
                logger.debug('Updating %s image to %s' % (author['AuthorName'], imagelink))
                newValueDict = {"AuthorImg": imagelink}

            if newValueDict:
                counter += 1
                controlValueDict = {"AuthorID": authorid}
                myDB.upsert("authors", newValueDict, controlValueDict)

        msg = 'Updated %s image%s' % (counter, plural(counter))
        logger.info('Author Image check complete: ' + msg)
    else:
        msg = 'No missing author images'
        logger.debug(msg)
    return msg
Esempio n. 25
0
def search_nzb_book(books=None, mags=None):
    if not(lazylibrarian.USE_NZB):
        logger.debug('NZB Search is disabled')
        return
    # rename this thread
    threading.currentThread().name = "SEARCHNZBBOOKS"
    myDB = database.DBConnection()
    searchlist = []
    #searchlist1 = []

    if books is None:
        # We are performing a backlog search
        searchbooks = myDB.select('SELECT BookID, AuthorName, Bookname from books WHERE Status="Wanted"')

        # Clear cache
        providercache = os.path.join(lazylibrarian.DATADIR, ".ProviderCache")
        if os.path.exists(providercache):
            try:
                shutil.rmtree(providercache)
                os.mkdir(providercache)
            except OSError, e:
                logger.info('Failed to clear cache: ' + str(e))
            
        #if os.path.exists(".ProviderCache"):
        #    for f in os.listdir(".ProviderCache"):
        #        os.unlink("%s/%s" % (".ProviderCache", f))

        # Clearing throttling timeouts
        t = SimpleCache.ThrottlingProcessor()
        t.lastRequestTime.clear()
Esempio n. 26
0
def processDestination(pp_path=None, dest_path=None, authorname=None, bookname=None, global_name=None, book_id=None):

    try:
        if not os.path.exists(dest_path):
            logger.debug('%s does not exist, so it\'s safe to create it' % dest_path)
        else:
            logger.debug('%s already exists. It will be overwritten' % dest_path)
            logger.debug('Removing existing tree')
            shutil.rmtree(dest_path)

        logger.debug('Attempting to copy/move tree')
        if lazylibrarian.DESTINATION_COPY == 1 and lazylibrarian.DOWNLOAD_DIR != pp_path:
            shutil.copytree(pp_path, dest_path)
            logger.debug('Successfully copied %s to %s.' % (pp_path, dest_path))
        elif lazylibrarian.DOWNLOAD_DIR == pp_path:
            for file3 in os.listdir(pp_path):
                if ((str(file3).split('.')[-1]) in lazylibrarian.EBOOK_TYPE):
                    bookID = str(file3).split("LL.(")[1].split(")")[0]
                    if bookID == book_id:
                        logger.info('Proccessing %s' % bookID)
                        if not os.path.exists(dest_path):
                            try:
                                os.makedirs(dest_path)
                            except Exception, e:
                                logger.debug(str(e))
                        if lazylibrarian.DESTINATION_COPY == 1:
                            shutil.copyfile(os.path.join(pp_path, file3), os.path.join(dest_path, file3))
                        else:
                            shutil.move(os.path.join(pp_path, file3), os.path.join(dest_path, file3))
        else:
Esempio n. 27
0
def removeTorrent(torrentid, remove_data=False):

    method = "torrent-get"
    arguments = {"ids": torrentid, "fields": ["isFinished", "name"]}

    response = torrentAction(method, arguments)
    if not response:
        return False

    try:
        finished = response["arguments"]["torrents"][0]["isFinished"]
        name = response["arguments"]["torrents"][0]["name"]

        if finished:
            logger.info("%s has finished seeding, removing torrent and data" % name)
            method = "torrent-remove"
            if remove_data:
                arguments = {"delete-local-data": True, "ids": torrentid}
            else:
                arguments = {"ids": torrentid}
            response = torrentAction(method, arguments)
            return True
        else:
            logger.info(
                "%s has not finished seeding yet, torrent will not be removed, will try again on next run" % name
            )
    except:
        return False

    return False
 def pauseAuthor(self, AuthorID):
     logger.info("Pausing author: " + AuthorID)
     myDB = database.DBConnection()
     controlValueDict = {'AuthorID': AuthorID}
     newValueDict = {'Status': 'Paused'}
     myDB.upsert("authors", newValueDict, controlValueDict)
     raise cherrypy.HTTPRedirect("authorPage?AuthorID=%s" % AuthorID)
Esempio n. 29
0
 def resumeAuthor(self, AuthorID):
     logger.info(u"Resuming author: " + AuthorID)
     myDB = database.DBConnection()
     controlValueDict = {'AuthorID': AuthorID}
     newValueDict = {'Status': 'Active'}
     myDB.upsert("authors", newValueDict, controlValueDict)
     logger.debug('AuthorID [%s]-[%s] Restarted - redirecting to Author home page' % (AuthorID,AuthorName))
     raise cherrypy.HTTPRedirect("authorPage?AuthorID=%s" % AuthorID)
Esempio n. 30
0
    def markBooks(self, AuthorName=None, action=None, **args):
        myDB = database.DBConnection()
        for bookid in args:
            # ouch dirty workaround...
            if not bookid == 'book_table_length':

                controlValueDict = {'BookID': bookid}
                newValueDict = {'Status': action}
                myDB.upsert("books", newValueDict, controlValueDict)
                logger.info('Status set to %s for BookID: %s' % (action, bookid))

                if AuthorName is not None:
                    authorname = AuthorName
                else:
                    find_author = myDB.action('SELECT AuthorName FROM books WHERE BookID="%s"' % bookid).fetchone()
                    authorname = find_author['AuthorName']
                if action == "Ignored":
                    lastbook = myDB.action("SELECT BookName, BookLink, BookDate from books WHERE AuthorName='%s' AND NOT Status='Ignored' order by BookDate DESC" % authorname.replace("'","''")).fetchone()
                    totalbooknum = myDB.action("SELECT TotalBooks, HaveBooks from authors WHERE AuthorName='%s'" % authorname.replace("'","''")).fetchone()
                    bookscount = int(totalbooknum['TotalBooks']) - 1
                    query = 'SELECT COUNT(*) FROM books WHERE AuthorName="%s" AND Status="Have"' % authorname
                    countbooks = myDB.action(query).fetchone()
                    havebooks = int(countbooks[0])    
                    controlValueDict = {"AuthorName": authorname}
                    newValueDict = {
                        "TotalBooks": bookscount,
                        "HaveBooks": havebooks,
                        "LastBook": lastbook['BookName'],
                        "LastLink": lastbook['BookLink'],
                        "LastDate": lastbook['BookDate']
                        }
                    myDB.upsert("authors", newValueDict, controlValueDict)
                else:
                    query = 'SELECT COUNT(*) FROM books WHERE AuthorName="%s" AND Status="Have"' % authorname
                    countbooks = myDB.action(query).fetchone()
                    havebooks = int(countbooks[0])
                    check_author = 'SELECT * FROM authors WHERE AuthorName="%s"' % authorname
                    dbauthor = myDB.action(check_author).fetchone()
                    controlValueDict = {"AuthorName": authorname}
                    newValueDict = {
                        "HaveBooks": havebooks,
                        }
                    if dbauthor is not None:
                        myDB.upsert("authors", newValueDict, controlValueDict)

        # start searchthreads
        books = []
        for bookid in args:
            # ouch dirty workaround...
            if not bookid == 'book_table_length':
                if action == 'Wanted':
                    books.append({"bookid": bookid})

        threading.Thread(target=searchbook, args=[books]).start()
        if AuthorName:
            raise cherrypy.HTTPRedirect("authorPage?AuthorName=%s" % AuthorName)
        else:
            raise cherrypy.HTTPRedirect("books")
Esempio n. 31
0
def processIMG(dest_path=None, bookimg=None):
    #handle pictures
    try:
        if not bookimg == 'images/nocover.png':
            logger.info('Downloading cover from ' + bookimg)
            coverpath = os.path.join(dest_path, 'cover.jpg')
            img = open(coverpath, 'wb')
            imggoogle = imgGoogle()
            img.write(imggoogle.open(bookimg).read())
            img.close()

    except (IOError, EOFError), e:
        logger.error('Error fetching cover from url: %s, %s' % (bookimg, e))
Esempio n. 32
0
def dump_table(table, savedir=None, status=None):
    myDB = database.DBConnection()
    # noinspection PyBroadException
    try:
        columns = myDB.select('PRAGMA table_info(%s)' % table)
        if not columns:  # no such table
            logger.warn("No such table [%s]" % table)
            return 0

        if not os.path.isdir(savedir):
            savedir = lazylibrarian.DATADIR

        headers = ''
        for item in columns:
            if headers:
                headers += ','
            headers += item[1]
        if status:
            cmd = 'SELECT %s from %s WHERE status="%s"' % (headers, table, status)
        else:
            cmd = 'SELECT %s from %s' % (headers, table)
        data = myDB.select(cmd)
        count = 0
        if data is not None:
            label = table
            if status:
                label += '_%s' % status
            csvFile = os.path.join(savedir, "%s.csv" % label)

            if PY2:
                fmode = 'wb'
            else:
                fmode = 'w'
            with open(csvFile, fmode) as csvfile:
                csvwrite = writer(csvfile, delimiter=',', quotechar='"', quoting=QUOTE_MINIMAL)
                headers = headers.split(',')
                csvwrite.writerow(headers)
                for item in data:
                    if PY2:
                        csvwrite.writerow([str(s).encode(lazylibrarian.SYS_ENCODING) if s else '' for s in item])
                    else:
                        csvwrite.writerow([str(s) if s else '' for s in item])
                    count += 1
            msg = "Exported %s item%s to %s" % (count, plural(count), csvFile)
            logger.info(msg)
        return count

    except Exception:
        msg = 'Unhandled exception in dump_table: %s' % traceback.format_exc()
        logger.error(msg)
        return 0
Esempio n. 33
0
    def _removeAuthor(self, **kwargs):
        if 'id' not in kwargs:
            self.data = 'Missing parameter: id'
            return
        else:
            self.id = kwargs['id']

        myDB = database.DBConnection()
        authorsearch = myDB.select('SELECT AuthorName from authors WHERE AuthorID="%s"' % id)
        if len(authorsearch):  # to stop error if try to remove an author while they are still loading
            AuthorName = authorsearch[0]['AuthorName']
            logger.info(u"Removing all references to author: %s" % AuthorName)
            myDB.action('DELETE from authors WHERE AuthorID="%s"' % id)
            myDB.action('DELETE from books WHERE AuthorID="%s"' % id)
Esempio n. 34
0
def dbUpdate(forcefull=False):

    myDB = database.DBConnection()

    activeauthors = myDB.select('SELECT AuthorID, AuthorName from authors WHERE Status="Active" \
                                or Status="Loading" order by DateAdded ASC')
    logger.info('Starting update for %i active author%s' % (len(activeauthors), plural(len(activeauthors))))

    for author in activeauthors:
        # authorid = author[0]
        authorname = author[1]
        importer.addAuthorToDB(authorname, refresh=True)

    logger.info('Active author update complete')
Esempio n. 35
0
def dbcheck():

    conn = sqlite3.connect(DBFILE)
    c = conn.cursor()
    c.execute(
        'CREATE TABLE IF NOT EXISTS authors (AuthorID TEXT, AuthorName TEXT UNIQUE, AuthorImg TEXT, AuthorLink TEXT, DateAdded TEXT, Status TEXT, LastBook TEXT, LastLink Text, LastDate TEXT, HaveBooks INTEGER, TotalBooks INTEGER, AuthorBorn TEXT, AuthorDeath TEXT)'
    )
    c.execute(
        'CREATE TABLE IF NOT EXISTS books (AuthorID TEXT, AuthorName TEXT, AuthorLink TEXT, BookName TEXT, BookSub TEXT, BookDesc TEXT, BookGenre TEXT, BookIsbn TEXT, BookPub TEXT, BookRate INTEGER, BookImg TEXT, BookPages INTEGER, BookLink TEXT, BookID TEXT UNIQUE, BookDate TEXT, BookLang TEXT, BookAdded TEXT, Status TEXT)'
    )
    c.execute(
        'CREATE TABLE IF NOT EXISTS wanted (BookID TEXT, NZBurl TEXT, NZBtitle TEXT, NZBdate TEXT, NZBprov TEXT, Status TEXT)'
    )

    try:
        logger.info('Checking database')
        c.execute('SELECT BookSub from books')
    except sqlite3.OperationalError:
        logger.info('Updating database to hold book subtitles.')
        c.execute('ALTER TABLE books ADD COLUMN BookSub TEXT')

    try:
        c.execute('SELECT BookPub from books')
    except sqlite3.OperationalError:
        logger.info('Updating database to hold book publisher')
        c.execute('ALTER TABLE books ADD COLUMN BookPub TEXT')

    try:
        c.execute('SELECT BookGenre from books')
    except sqlite3.OperationalError:
        logger.info('Updating database to hold bookgenre')
        c.execute('ALTER TABLE books ADD COLUMN BookGenre TEXT')

    conn.commit()
    c.close()
Esempio n. 36
0
def export_CSV(search_dir=None, status="Wanted"):
    """ Write a csv file to the search_dir containing all books marked as "Wanted" """
    # noinspection PyBroadException
    try:
        if not search_dir:
            msg = "Alternate Directory not configured"
            logger.warn(msg)
            return msg
        elif not os.path.isdir(search_dir):
            msg = "Alternate Directory [%s] not found" % search_dir
            logger.warn(msg)
            return msg
        elif not os.access(search_dir, os.W_OK | os.X_OK):
            msg = "Alternate Directory [%s] not writable" % search_dir
            logger.warn(msg)
            return msg

        csvFile = os.path.join(search_dir, "%s - %s.csv" % (status, now().replace(':', '-')))

        myDB = database.DBConnection()

        cmd = 'SELECT BookID,AuthorName,BookName,BookIsbn,books.AuthorID FROM books,authors '
        cmd += 'WHERE books.Status=? and books.AuthorID = authors.AuthorID'
        find_status = myDB.select(cmd, (status,))

        if not find_status:
            msg = "No books marked as %s" % status
            logger.warn(msg)
        else:
            count = 0
            with open(csvFile, 'wb') as csvfile:
                csvwrite = csv.writer(csvfile, delimiter=',',
                                      quotechar='"', quoting=csv.QUOTE_MINIMAL)

                # write headers, change AuthorName BookName BookIsbn to match import csv names (Author, Title, ISBN10)
                csvwrite.writerow(['BookID', 'Author', 'Title', 'ISBN', 'AuthorID'])

                for resulted in find_status:
                    logger.debug(u"Exported CSV for book %s" % resulted['BookName'])
                    row = ([resulted['BookID'], resulted['AuthorName'], resulted['BookName'],
                            resulted['BookIsbn'], resulted['AuthorID']])
                    csvwrite.writerow([("%s" % s).encode(lazylibrarian.SYS_ENCODING) for s in row])
                    count += 1
            msg = "CSV exported %s book%s to %s" % (count, plural(count), csvFile)
            logger.info(msg)
        return msg
    except Exception:
        msg = 'Unhandled exception in exportCSV: %s' % traceback.format_exc()
        logger.error(msg)
        return msg
Esempio n. 37
0
    def resumeAuthor(self, AuthorID):
        myDB = database.DBConnection()
        authorsearch = myDB.select(
            'SELECT AuthorName from authors WHERE AuthorID=?', [AuthorID])
        AuthorName = authorsearch[0]['AuthorName']
        logger.info("Resuming author: %s" % AuthorName)

        controlValueDict = {'AuthorID': AuthorID}
        newValueDict = {'Status': 'Active'}
        myDB.upsert("authors", newValueDict, controlValueDict)
        logger.debug(
            'AuthorID [%s]-[%s] Restarted - redirecting to Author home page' %
            (AuthorID, AuthorName))
        raise cherrypy.HTTPRedirect("authorPage?AuthorName=%s" % AuthorName)
Esempio n. 38
0
def shutdown(restart=False, update=False):

    cherrypy.engine.exit()
    SCHED.shutdown(wait=False)
    config_write()

    if not restart and not update:
        logger.info('LazyLibrarian is shutting down...')
    if update:
        logger.info('LazyLibrarian is updating...')
        try:
            versioncheck.update()
        except Exception, e:
            logger.warn('LazyLibrarian failed to update: %s. Restarting.' % e)
Esempio n. 39
0
def create_covers(refresh=False):
    if lazylibrarian.CONFIG[
            'IMP_CONVERT'] == 'None':  # special flag to say "no covers required"
        logger.info('Cover creation is disabled in config')
        return
    myDB = database.DBConnection()
    #  <> '' ignores empty string or NULL
    issues = myDB.select("SELECT IssueFile from issues WHERE IssueFile <> ''")
    if refresh:
        logger.info("Creating covers for %s issue%s" %
                    (len(issues), plural(len(issues))))
    else:
        logger.info("Checking covers for %s issue%s" %
                    (len(issues), plural(len(issues))))
    cnt = 0
    for item in issues:
        try:
            create_cover(item['IssueFile'], refresh=refresh)
            cnt += 1
        except Exception as why:
            logger.debug('Unable to create cover for %s, %s %s' %
                         (item['IssueFile'], type(why).__name__, str(why)))
    logger.info("Cover creation completed")
    if refresh:
        return "Created covers for %s issue%s" % (cnt, plural(cnt))
    return "Checked covers for %s issue%s" % (cnt, plural(cnt))
Esempio n. 40
0
def createMagCovers(refresh=False):
    if not lazylibrarian.CONFIG['IMP_MAGCOVER']:
        logger.info('Cover creation is disabled in config')
        return
    myDB = database.DBConnection()
    #  <> '' ignores empty string or NULL
    issues = myDB.select(
        "SELECT Title,IssueFile from issues WHERE IssueFile <> ''")
    if refresh:
        logger.info("Creating covers for %s issue%s" %
                    (len(issues), plural(len(issues))))
    else:
        logger.info("Checking covers for %s issue%s" %
                    (len(issues), plural(len(issues))))
    cnt = 0
    for item in issues:
        try:
            maginfo = myDB.match(
                "SELECT CoverPage from magazines WHERE Title=?",
                (item['Title'], ))
            createMagCover(item['IssueFile'],
                           refresh=refresh,
                           pagenum=maginfo['CoverPage'])
            cnt += 1
        except Exception as why:
            logger.warn('Unable to create cover for %s, %s %s' %
                        (item['IssueFile'], type(why).__name__, str(why)))
    logger.info("Cover creation completed")
    if refresh:
        return "Created covers for %s issue%s" % (cnt, plural(cnt))
    return "Checked covers for %s issue%s" % (cnt, plural(cnt))
Esempio n. 41
0
def DownloadMethod(bookid=None, nzbprov=None, nzbtitle=None, nzburl=None):

    myDB = database.DBConnection()

    if lazylibrarian.SAB_HOST and not lazylibrarian.NZB_DOWNLOADER_BLACKHOLE:
        download = sabnzbd.SABnzbd(nzbtitle, nzburl)

    elif lazylibrarian.NZBGET_HOST and not lazylibrarian.NZB_DOWNLOADER_BLACKHOLE:
        headers = {'User-Agent': USER_AGENT}
        data = request.request_content(url=nzburl, headers=headers)
        nzb = classes.NZBDataSearchResult()
        nzb.extraInfo.append(data)
        nzb.name = nzbtitle
        nzb.url = nzburl
        download = nzbget.sendNZB(nzb)

    elif lazylibrarian.NZB_DOWNLOADER_BLACKHOLE:

        try:
            req = urllib2.Request(nzburl)
            if lazylibrarian.PROXY_HOST:
                req.set_proxy(lazylibrarian.PROXY_HOST,
                              lazylibrarian.PROXY_TYPE)
            req.add_header('User-Agent', USER_AGENT)
            nzbfile = urllib2.urlopen(req, timeout=90).read()

        except urllib2.URLError, e:
            logger.warn('Error fetching nzb from url: ' + nzburl + ' %s' % e)
            nzbfile = False

        if (nzbfile):

            nzbname = str(nzbtitle) + '.nzb'
            nzbpath = os.path.join(lazylibrarian.NZB_BLACKHOLEDIR, nzbname)

            try:
                f = open(nzbpath, 'w')
                f.write(nzbfile)
                f.close()
                logger.info('NZB file saved to: ' + nzbpath)
                download = True
                try:
                    os.chmod(nzbpath, 0777)
                except Exception, e:
                    logger.info("Could not chmod path: " + str(file2))
            except Exception, e:
                logger.error('%s not writable, NZB not saved. Error: %s' %
                             (nzbpath, e))
                download = False
Esempio n. 42
0
def processAutoAdd(src_path=None):
    # Called to copy the book files to an auto add directory for the likes of Calibre which can't do nested dirs
    autoadddir = lazylibrarian.IMP_AUTOADD
    logger.debug('AutoAdd - Attempt to copy from [%s] to [%s]' %
                 (src_path, autoadddir))

    if not os.path.exists(autoadddir):
        logger.error(
            'AutoAdd directory [%s] is missing or not set - cannot perform autoadd copy'
            % autoadddir)
        return False
    else:
        # Now try and copy all the book files into a single dir.

        try:
            names = os.listdir(src_path)
            # TODO : n files jpg, opf & book(s) should have same name
            # Caution - book may be pdf, mobi, epub or all 3.
            # for now simply copy all files, and let the autoadder sort it out
            #
            # Update - seems Calibre only uses the ebook, not the jpeg or opf files
            # and only imports one format of each ebook, treats the others as duplicates
            # Maybe need to rewrite this so we only copy the first ebook we find and ignore everything else
            #
            for name in names:
                srcname = os.path.join(src_path, name)
                dstname = os.path.join(autoadddir, name)
                logger.debug('AutoAdd Copying file [%s] as copy [%s] to [%s]' %
                             (name, srcname, dstname))
                try:
                    shutil.copyfile(srcname, dstname)
                except Exception as why:
                    logger.error(
                        'AutoAdd - Failed to copy file [%s] because [%s] ' %
                        (name, str(why)))
                    return False
                try:
                    os.chmod(dstname, 0o666)  # make rw for calibre
                except OSError as why:
                    logger.warn("Could not set permission of %s because [%s]" %
                                (dstname, why.strerror))
                    # permissions might not be fatal, continue

        except OSError as why:
            logger.error('AutoAdd - Failed because [%s]' % why.strerror)
            return False

    logger.info('Auto Add completed for [%s]' % dstname)
    return True
Esempio n. 43
0
def NewzNab(book=None, newznabNumber=None):

    if (newznabNumber == "1"):
        HOST = lazylibrarian.NEWZNAB_HOST
        logger.info('Searching for %s.' % book['searchterm'] + " at: " + lazylibrarian.NEWZNAB_HOST)
    if (newznabNumber == "2"):
        HOST = lazylibrarian.NEWZNAB_HOST2
        logger.info('Searching for %s.' % book['searchterm'] + " at: " + lazylibrarian.NEWZNAB_HOST2)

    results = []

    if lazylibrarian.EBOOK_TYPE == None:
        params = {
            "t": "book",
            "apikey": lazylibrarian.NEWZNAB_API,
            #"cat": 7020,
            "author": book['searchterm']
        }
    else:
        params = {
            "t": "search",
            "apikey": lazylibrarian.NEWZNAB_API,
            "cat": 7020,
            "q": book['searchterm'],
            "extended": 1,
        }

    if not str(HOST)[:4] == "http":
        HOST = 'http://' + HOST

    URL = HOST + '/api?' + urllib.urlencode(params)

    try :
        request = urllib2.Request(URL)
	if lazylibrarian.PROXY_HOST:
		request.set_proxy(lazylibrarian.PROXY_HOST, lazylibrarian.PROXY_TYPE)
        request.add_header('User-Agent', USER_AGENT)
        opener = urllib2.build_opener(SimpleCache.CacheHandler(".ProviderCache"), SimpleCache.ThrottlingProcessor(5))
        resp = opener.open(request)

        try:
            data = ElementTree.parse(resp)
        except (urllib2.URLError, IOError, EOFError), e:
            logger.warn('Error fetching data from %s: %s' % (lazylibrarian.NEWZNAB_HOST, e))
            data = None

    except Exception, e:
        logger.error("Error 403 openning url")
        data = None
Esempio n. 44
0
def removeTorrent(hash, remove_data=False):
    logger.debug('removeTorrent(%s,%s)' % (hash,remove_data))

    qbclient = qbittorrentclient()
    status, torrentList = qbclient._get_list()
    for torrent in torrentList:
        if torrent['hash'].upper() == hash.upper():
            if torrent['state'] == 'uploading' or torrent['state'] == 'stalledUP':
                logger.info('%s has finished seeding, removing torrent and data' % torrent['name'])
                qbclient.remove(hash, remove_data)
                return True
            else:
                logger.info('%s has not finished seeding yet, torrent will not be removed, will try again on next run' % torrent['name'])
                return False
    return False
Esempio n. 45
0
def logmsg(level, msg):
    # log messages to logger if initialised, or print if not.
    if lazylibrarian.__INITIALIZED__:
        if level == 'error':
            logger.error(msg)
        elif level == 'info':
            logger.info(msg)
        elif level == 'debug':
            logger.debug(msg)
        elif level == 'warn':
            logger.warn(msg)
        else:
            logger.info(msg)
    else:
        print(level.upper(), msg)
Esempio n. 46
0
 def _connect(self):
     logger.info('Connecting to %s:%s' % (self.host, self.port))
     try:
         self._socket.connect((self.host, self.port))
     except ssl.SSLError as e:
         # Note: have not verified that we actually get errno 258 for this error
         if (hasattr(ssl, 'PROTOCOL_SSLv3')
                 and (getattr(e, 'reason', None) == 'UNSUPPORTED_PROTOCOL'
                      or e.errno == 258)):
             logger.warning(
                 'Was unable to ssl handshake, trying to force SSLv3 (insecure)'
             )
             self._create_socket(ssl_version=ssl.PROTOCOL_SSLv3)
             self._socket.connect((self.host, self.port))
         else:
             raise
Esempio n. 47
0
    def find_author_id(self):

        URL = 'http://www.goodreads.com/api/author_url/?' + urllib.urlencode(
            self.name) + '&' + urllib.urlencode(self.params)
        logger.info("Searching for author with name: %s" % self.name)

        # Cache our request
        request = urllib2.Request(URL)
        opener = urllib2.build_opener(SimpleCache.CacheHandler(".AuthorCache"),
                                      SimpleCache.ThrottlingProcessor(5))
        resp = opener.open(request)

        try:
            sourcexml = ElementTree.parse(resp)
        except Exception, e:
            logger.error("Error fetching authorid: " + str(e))
Esempio n. 48
0
def BlockProvider(who, why):
    delay = check_int(lazylibrarian.CONFIG['BLOCKLIST_TIMER'], 3600)
    if delay == 0:
        logger.debug('Not blocking %s,%s as timer is zero' % (who, why))
    else:
        mins = int(delay / 60) + (delay % 60 > 0)
        logger.info("Blocking provider %s for %s minutes because %s" %
                    (who, mins, why))
        timenow = int(time.time())
        for entry in lazylibrarian.PROVIDER_BLOCKLIST:
            if entry["name"] == who:
                lazylibrarian.PROVIDER_BLOCKLIST.remove(entry)
        newentry = {"name": who, "resume": timenow + delay, "reason": why}
        lazylibrarian.PROVIDER_BLOCKLIST.append(newentry)
    logger.debug("Provider Blocklist contains %s entries" %
                 len(lazylibrarian.PROVIDER_BLOCKLIST))
Esempio n. 49
0
 def markWanted(self, action=None, **args):
     myDB = database.DBConnection()
     #I think I need to consolidate bookid in args to unique values...
     for nzbtitle in args:
         if not nzbtitle == 'book_table_length':
             if action != "Delete":
                 controlValueDict = {"NZBtitle": nzbtitle}
                 newValueDict = {
                     "Status":       action,
                     }
                 myDB.upsert("wanted", newValueDict, controlValueDict)
                 logger.info('Status of wanted item %s changed to %s' % (nzbtitle, action))
             else:
                 myDB.action('DELETE from wanted WHERE NZBtitle=?', [nzbtitle])
                 logger.info('Item %s removed from wanted' % nzbtitle)
             raise cherrypy.HTTPRedirect("wanted")
Esempio n. 50
0
def setAllBookSeries():
    """ Try to set series details for all books from workpages"""
    myDB = database.DBConnection()
    books = myDB.select('select BookID from books where Manual is not "1"')
    counter = 0
    if books:
        logger.info('Checking series for %s book%s' % (len(books), plural(len(books))))
        for book in books:
            bookid = book['BookID']
            seriesdict = getWorkSeries(bookid)
            if seriesdict:
                counter += 1
                setSeries(seriesdict, bookid)
    deleteEmptySeries()
    msg = 'Updated %s book%s' % (counter, plural(counter))
    logger.info('Series check complete: ' + msg)
    return msg
Esempio n. 51
0
def processIMG(dest_path=None, bookimg=None, global_name=None):
    #handle pictures
    try:
        if not bookimg == ('images/nocover.png'):
            logger.debug('Downloading cover from ' + bookimg)
            coverpath = os.path.join(dest_path, global_name + '.jpg')
            img = open(coverpath, 'wb')
            imggoogle = imgGoogle()
            img.write(imggoogle.open(bookimg).read())
            img.close()
            try:
                os.chmod(coverpath, 0777)
            except Exception, e:
                logger.info("Could not chmod path: " + str(coverpath))

    except (IOError, EOFError), e:
        logger.error('Error fetching cover from url: %s, %s' % (bookimg, e))
Esempio n. 52
0
def CheckFolder():
    myDB = database.DBConnection()
    snatched = myDB.select('SELECT * from wanted WHERE Status="Snatched"')
    pp_path = lazylibrarian.SAB_DIR

    if snatched:
        for book in snatched:
            logger.info(book['BookID'])
            pp_pathsub = os.path.join(pp_path, book['NZBtitle'])

            if os.path.exists(pp_pathsub):
                logger.debug('Found %s. Processing %s' % (book['NZBtitle'], pp_pathsub))
                processPath(book['BookID'], pp_pathsub)
            else:
                logger.error('No path found for: %s. Can\'t process it.' % book['NZBtitle'])
    else:
        logger.info('No books with status Snatched are found, nothing to process.')
Esempio n. 53
0
    def _send_tweet(self, message=None):

        username = self.consumer_key
        password = self.consumer_secret
        access_token_key = lazylibrarian.TWITTER_USERNAME
        access_token_secret = lazylibrarian.TWITTER_PASSWORD

        logger.info(u"Sending tweet: " + message)

        api = twitter.Api(username, password, access_token_key,
                          access_token_secret)

        try:
            api.PostUpdate(message)
        except Exception, e:
            logger.error(u"Error Sending Tweet: %s" % e)
            return False
Esempio n. 54
0
def DownloadMethod(bookid=None, nzbprov=None, nzbtitle=None, nzburl=None):

    myDB = database.DBConnection()

    if lazylibrarian.SAB_HOST and not lazylibrarian.BLACKHOLE:
        download = sabnzbd.SABnzbd(nzbtitle, nzburl)
        logger.debug('Nzbfile has been downloaded from ' + str(nzburl))
        myDB.action('UPDATE books SET status = "Snatched" WHERE BookID=?',
                    [bookid])
        myDB.action('UPDATE wanted SET status = "Snatched" WHERE BookID=?',
                    [bookid])

    elif lazylibrarian.BLACKHOLE:

        try:
            req = urllib2.Request(nzburl)
            req.add_header(
                'User-Agent',
                'lazylibrary/0.0 +https://github.com/herman-rogers/LazyLibrarian-1'
            )
            nzbfile = urllib2.urlopen(req, timeout=90).read()

        except urllib2.URLError, e:
            logger.warn('Error fetching nzb from url: ' + nzburl + ' %s' % e)
            nzbfile = False

        if (nzbfile):

            nzbname = str(nzbtitle) + '.nzb'
            nzbpath = os.path.join(lazylibrarian.BLACKHOLEDIR, nzbname)

            try:
                f = open(nzbpath, 'w')
                f.write(nzbfile)
                f.close()
                logger.info('NZB file saved to: ' + nzbpath)
                download = True
                try:
                    os.chmod(nzbpath, 0777)
                except Exception, e:
                    logger.info("Could not chmod path: " + str(file2))
            except Exception, e:
                logger.error('%s not writable, NZB not saved. Error: %s' %
                             (nzbpath, e))
                download = False
Esempio n. 55
0
    def connect(self):
        """
        Connects to the Deluge instance
        """
        if not self.host or not self.port:
            logger.error(
                'Invalid deluge daemon host or port, check your config')
            return False

        logger.info('Connecting to %s:%s' % (self.host, self.port))
        self._socket.connect((self.host, self.port))
        logger.debug('Connected to Deluge, logging in')
        if self.username:
            result = self.call('daemon.login', self.username, self.password)
        else:
            result = self.call('auth.login', self.password)
        logger.debug('Logged in with value %r' % result)
        self.connected = True
Esempio n. 56
0
    def addResults(self, action=None, **args):
        for arg in args:
            if not arg == 'book_table_length':
                name = arg.split('&')
                authorname = name[0]
                bookid = name[1]

                if action == 'author':
                    threading.Thread(target=importer.addAuthorToDB,
                                     args=[authorname]).start()
                    raise cherrypy.HTTPRedirect("authorPage?AuthorName=%s" %
                                                authorname)
                elif action == 'book':
                    threading.Thread(target=importer.addBookToDB,
                                     args=[bookid, authorname]).start()
                    raise cherrypy.HTTPRedirect("bookPage?BookID=%s" % bookid)
                else:
                    logger.info('Oops, a bug')
Esempio n. 57
0
def exportCSV(search_dir=None, status="Wanted"):
    """ Write a csv file to the search_dir containing all books marked as "Wanted" """

    if not search_dir or os.path.isdir(search_dir) is False:
        logger.warn("Alternate Directory must not be empty")
        return False

    csvFile = os.path.join(
        search_dir,
        "%s - %s.csv" % (status, formatter.now().replace(':', '-')))

    myDB = database.DBConnection()

    find_status = myDB.select('SELECT * FROM books WHERE Status = "%s"' %
                              status)

    if not find_status:
        logger.warn(u"No books marked as %s" % status)
    else:
        count = 0
        with open(csvFile, 'wb') as csvfile:
            csvwrite = csv.writer(csvfile,
                                  delimiter=',',
                                  quotechar='"',
                                  quoting=csv.QUOTE_MINIMAL)

            # write headers, change AuthorName BookName BookIsbn to match import csv names (Author, Title, ISBN10)
            csvwrite.writerow(
                ['BookID', 'Author', 'Title', 'ISBN', 'AuthorID'])

            for resulted in find_status:
                logger.debug(
                    u"Exported CSV for book %s" %
                    resulted['BookName'].encode(lazylibrarian.SYS_ENCODING))
                row = ([
                    resulted['BookID'], resulted['AuthorName'],
                    resulted['BookName'], resulted['BookIsbn'],
                    resulted['AuthorID']
                ])
                csvwrite.writerow([
                    ("%s" % s).encode(lazylibrarian.SYS_ENCODING) for s in row
                ])
                count = count + 1
        logger.info(u"CSV exported %s books to %s" % (count, csvFile))
Esempio n. 58
0
def processOPF(dest_path=None, authorname=None, bookname=None, bookisbn=None, bookid=None, bookpub=None, bookdate=None, bookdesc=None, booklang=None, global_name=None):
	opfinfo = '<?xml version="1.0"  encoding="UTF-8"?>\n\
<package version="2.0" xmlns="http://www.idpf.org/2007/opf" >\n\
	<metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n\
		<dc:title>%s</dc:title>\n\
		<creator>%s</creator>\n\
		<dc:language>%s</dc:language>\n\
		<dc:identifier scheme="GoogleBooks">%s</dc:identifier>\n' % (bookname, authorname, booklang, bookid)

	if bookisbn:
		opfinfo += '        <dc:identifier scheme="ISBN">%s</dc:identifier>\n' % bookisbn

	if bookpub:
		opfinfo += '        <dc:publisher>%s</dc:publisher>\n' % bookpub

	if bookdate:
		opfinfo += '        <dc:date>%s</dc:date>\n' % bookdate

	if bookdesc:
		opfinfo += '        <dc:description>%s</dc:description>\n' % bookdesc

	opfinfo += '        <guide>\n\
			<reference href="cover.jpg" type="cover" title="Cover"/>\n\
		</guide>\n\
	</metadata>\n\
</package>'

	dic = {'...':'', ' & ':' ', ' = ': ' ', '$':'s', ' + ':' ', ',':'', '*':''}

	opfinfo = formatter.latinToAscii(formatter.replace_all(opfinfo, dic))

	#handle metadata
	opfpath = os.path.join(dest_path, global_name+'.opf')
	if not os.path.exists(opfpath):
		opf = open(opfpath, 'wb')
		opf.write(opfinfo)
		opf.close()

		try:
			os.chmod(opfpath, 0777);
		except Exception, e:
			logger.info("Could not chmod path: " + str(opfpath));

		logger.debug('Saved metadata to: ' + opfpath)
Esempio n. 59
0
def dbUpdate(refresh=False):
    try:
        myDB = database.DBConnection()
        cmd = 'SELECT AuthorID from authors WHERE Status="Active" or Status="Loading" order by DateAdded ASC'
        activeauthors = myDB.select(cmd)
        lazylibrarian.AUTHORS_UPDATE = True
        logger.info('Starting update for %i active author%s' % (len(activeauthors), plural(len(activeauthors))))
        for author in activeauthors:
            authorid = author['AuthorID']
            # noinspection PyUnresolvedReferences
            lazylibrarian.importer.addAuthorToDB(refresh=refresh, authorid=authorid)
        logger.info('Active author update complete')
        lazylibrarian.AUTHORS_UPDATE = False
        return 'Updated %i active author%s' % (len(activeauthors), plural(len(activeauthors)))
    except Exception:
        lazylibrarian.AUTHORS_UPDATE = False
        msg = 'Unhandled exception in dbUpdate: %s' % traceback.format_exc()
        logger.error(msg)
        return msg
Esempio n. 60
0
    def _get_authorization(self):

        _ = oauth.SignatureMethod_HMAC_SHA1()
        oauth_consumer = oauth.Consumer(key=self.consumer_key, secret=self.consumer_secret)
        oauth_client = oauth.Client(oauth_consumer)

        logger.info('Requesting temp token from Twitter')

        resp, content = oauth_client.request(self.REQUEST_TOKEN_URL, 'GET')

        if resp['status'] != '200':
            logger.info('Invalid respond from Twitter requesting temp token: %s' % resp['status'])
        else:
            request_token = dict(parse_qsl(content))

            lazylibrarian.CONFIG['TWITTER_USERNAME'] = request_token['oauth_token']
            lazylibrarian.CONFIG['TWITTER_PASSWORD'] = request_token['oauth_token_secret']

            return self.AUTHORIZATION_URL + "?oauth_token=" + request_token['oauth_token']