Example #1
0
def do_download_worker(book_list,
                       options,
                       cpus,
                       merge=False,
                       notification=lambda x,y:x):
    '''
    Master job, to launch child jobs to extract ISBN for a set of books
    This is run as a worker job in the background to keep the UI more
    responsive and get around the memory leak issues as it will launch
    a child job for each book as a worker process
    '''
    server = Server(pool_size=cpus)

    logger.info(options['version'])
    total = 0
    alreadybad = []
    # Queue all the jobs
    logger.info("Adding jobs for URLs:")
    for book in book_list:
        logger.info("%s"%book['url'])
        if book['good']:
            total += 1
            args = ['calibre_plugins.fanficfare_plugin.jobs',
                    'do_download_for_worker',
                    (book,options,merge)]
            job = ParallelJob('arbitrary_n',
                              "url:(%s) id:(%s)"%(book['url'],book['calibre_id']),
                              done=None,
                              args=args)
            job._book = book
            server.add_job(job)
        else:
            # was already bad before the subprocess ever started.
            alreadybad.append(book)
    
    # This server is an arbitrary_n job, so there is a notifier available.
    # Set the % complete to a small number to avoid the 'unavailable' indicator
    notification(0.01, _('Downloading FanFiction Stories'))

    # dequeue the job results as they arrive, saving the results
    count = 0
    while True:
        job = server.changed_jobs_queue.get()
        # A job can 'change' when it is not finished, for example if it
        # produces a notification. Ignore these.
        job.update()
        if not job.is_finished:
            continue
        # A job really finished. Get the information.
        book_list.remove(job._book)
        book_list.append(job.result)
        book_id = job._book['calibre_id']
        count = count + 1
        notification(float(count)/total, '%d of %d stories finished downloading'%(count,total))
        # Add this job's output to the current log
        logger.info('Logfile for book ID %s (%s)'%(book_id, job._book['title']))
        logger.info(job.details)

        if count >= total:
            ## ordering first by good vs bad, then by listorder.
            good_list = filter(lambda x : x['good'], book_list)
            bad_list = filter(lambda x : not x['good'], book_list)
            good_list = sorted(good_list,key=lambda x : x['listorder'])
            bad_list = sorted(bad_list,key=lambda x : x['listorder'])
            
            logger.info("\n"+_("Download Results:")+"\n%s\n"%("\n".join([ "%(url)s %(comment)s" % book for book in good_list+bad_list])))
            
            logger.info("\n"+_("Successful:")+"\n%s\n"%("\n".join([book['url'] for book in good_list])))
            logger.info("\n"+_("Unsuccessful:")+"\n%s\n"%("\n".join([book['url'] for book in bad_list])))
            break

    server.close()
    
    # return the book list as the job result
    return book_list
Example #2
0
def do_download_worker(book_list,
                       options,
                       cpus,
                       merge=False,
                       notification=lambda x, y: x):
    '''
    Coordinator job, to launch child jobs to extract ISBN for a set of books
    This is run as a worker job in the background to keep the UI more
    responsive and get around the memory leak issues as it will launch
    a child job for each book as a worker process
    '''
    server = Server(pool_size=cpus)

    logger.info(options['version'])
    total = 0
    alreadybad = []
    # Queue all the jobs
    logger.info("Adding jobs for URLs:")
    for book in book_list:
        logger.info("%s" % book['url'])
        if book['good']:
            total += 1
            args = [
                'calibre_plugins.fanficfare_plugin.jobs',
                'do_download_for_worker', (book, options, merge)
            ]
            job = ParallelJob('arbitrary_n',
                              "url:(%s) id:(%s)" %
                              (book['url'], book['calibre_id']),
                              done=None,
                              args=args)
            job._book = book
            server.add_job(job)
        else:
            # was already bad before the subprocess ever started.
            alreadybad.append(book)

    # This server is an arbitrary_n job, so there is a notifier available.
    # Set the % complete to a small number to avoid the 'unavailable' indicator
    notification(0.01, _('Downloading FanFiction Stories'))

    # dequeue the job results as they arrive, saving the results
    count = 0
    while True:
        job = server.changed_jobs_queue.get()
        # A job can 'change' when it is not finished, for example if it
        # produces a notification. Ignore these.
        job.update()
        if not job.is_finished:
            continue
        # A job really finished. Get the information.
        book_list.remove(job._book)
        book_list.append(job.result)
        book_id = job._book['calibre_id']
        count = count + 1
        notification(
            float(count) / total,
            _('%d of %d stories finished downloading') % (count, total))
        # Add this job's output to the current log
        logger.info('Logfile for book ID %s (%s)' %
                    (book_id, job._book['title']))
        logger.info(job.details)

        if count >= total:
            book_list = sorted(book_list, key=lambda x: x['listorder'])
            logger.info("\n" + _("Download Results:") + "\n%s\n" % ("\n".join([
                "%(status)s %(url)s %(comment)s" % book for book in book_list
            ])))

            good_lists = defaultdict(list)
            bad_lists = defaultdict(list)
            for book in book_list:
                if book['good']:
                    good_lists[book['status']].append(book)
                else:
                    bad_lists[book['status']].append(book)

            order = [
                _('Add'),
                _('Update'),
                _('Meta'),
                _('Different URL'),
                _('Rejected'),
                _('Skipped'),
                _('Bad'),
                _('Error'),
            ]
            j = 0
            for d in [good_lists, bad_lists]:
                for status in order:
                    if d[status]:
                        l = d[status]
                        logger.info("\n" + status + "\n%s\n" %
                                    ("\n".join([book['url'] for book in l])))
                        for book in l:
                            book['reportorder'] = j
                            j += 1
                    del d[status]
                # just in case a status is added but doesn't appear in order.
                for status in d.keys():
                    logger.info("\n" + status + "\n%s\n" %
                                ("\n".join([book['url']
                                            for book in d[status]])))
            break

    server.close()

    # return the book list as the job result
    return book_list
Example #3
0
def do_download_worker(book_list, options, cpus, notification=lambda x, y: x):
    '''
    Master job, to launch child jobs to extract ISBN for a set of books
    This is run as a worker job in the background to keep the UI more
    responsive and get around the memory leak issues as it will launch
    a child job for each book as a worker process
    '''
    server = Server(pool_size=cpus)

    logger.info(options['version'])
    total = 0
    alreadybad = []
    # Queue all the jobs
    logger.info("Adding jobs for URLs:")
    for book in book_list:
        logger.info("%s" % book['url'])
        if book['good']:
            total += 1
            args = [
                'calibre_plugins.fanficfare_plugin.jobs',
                'do_download_for_worker', (book, options)
            ]
            job = ParallelJob('arbitrary_n',
                              "url:(%s) id:(%s)" %
                              (book['url'], book['calibre_id']),
                              done=None,
                              args=args)
            job._book = book
            server.add_job(job)
        else:
            # was already bad before the subprocess ever started.
            alreadybad.append(book)

    # This server is an arbitrary_n job, so there is a notifier available.
    # Set the % complete to a small number to avoid the 'unavailable' indicator
    notification(0.01, _('Downloading FanFiction Stories'))

    # dequeue the job results as they arrive, saving the results
    count = 0
    while True:
        job = server.changed_jobs_queue.get()
        # A job can 'change' when it is not finished, for example if it
        # produces a notification. Ignore these.
        job.update()
        if not job.is_finished:
            continue
        # A job really finished. Get the information.
        book_list.remove(job._book)
        book_list.append(job.result)
        book_id = job._book['calibre_id']
        count = count + 1
        notification(
            float(count) / total,
            '%d of %d stories finished downloading' % (count, total))
        # Add this job's output to the current log
        logger.info('Logfile for book ID %s (%s)' %
                    (book_id, job._book['title']))
        logger.info(job.details)

        if count >= total:
            logger.info("\n" + _("Successful:") + "\n%s\n" % ("\n".join([
                book['url'] for book in filter(lambda x: x['good'], book_list)
            ])))
            logger.info("\n" + _("Unsuccessful:") + "\n%s\n" % ("\n".join([
                book['url']
                for book in filter(lambda x: not x['good'], book_list)
            ])))
            break

    server.close()

    # return the book list as the job result
    return book_list