def compress_images(container, report=None, names=None, jpeg_quality=None, progress_callback=lambda n, t, name: True): images = get_compressible_images(container) if names is not None: images &= set(names) results = {} queue = Queue() abort = Event() for name in images: queue.put(name) def pc(name): keep_going = progress_callback(len(results), len(images), name) if not keep_going: abort.set() progress_callback(0, len(images), '') [ Worker(abort, 'CompressImage%d' % i, queue, results, container, jpeg_quality, pc) for i in range(min(detect_ncpus(), len(images))) ] queue.join() before_total = after_total = 0 changed = False for name, (ok, res) in results.iteritems(): name = force_unicode(name, filesystem_encoding) if ok: before, after = res if before != after: changed = True before_total += before after_total += after if report: if before != after: report( _('{0} compressed from {1} to {2} bytes [{3:.1%} reduction]' ).format(name, human_readable(before), human_readable(after), (before - after) / before)) else: report( _('{0} could not be further compressed').format(name)) else: report(_('Failed to process {0} with error:').format(name)) report(res) if report: if changed: report('') report( _('Total image filesize reduced from {0} to {1} [{2:.1%} reduction]' ).format(human_readable(before_total), human_readable(after_total), (before_total - after_total) / before_total)) else: report(_('Images are already fully optimized')) return changed, results
def launch_workers(self, names, in_process_container): num_workers = min(detect_ncpus(), len(names)) if self.max_workers: num_workers = min(num_workers, self.max_workers) if num_workers > 1: if len(names) < 3 or sum(os.path.getsize(in_process_container.name_path_map[n]) for n in names) < 128 * 1024: num_workers = 1 if num_workers > 1: num_other_workers = num_workers - 1 while len(self.workers) < num_other_workers: self.launch_worker() return num_workers
def __init__(self, max_workers=None, name=None): Thread.__init__(self, name=name) self.max_workers = max_workers or detect_ncpus() self.available_workers = [] self.busy_workers = {} self.pending_jobs = [] self.events = Queue() self.results = Queue() self.tracker = Queue() self.terminal_failure = None self.common_data = pickle_dumps(None) self.shutting_down = False self.start()
def __init__(self, max_workers=None, name=None): Thread.__init__(self, name=name) self.max_workers = max_workers or detect_ncpus() self.available_workers = [] self.busy_workers = {} self.pending_jobs = [] self.events = Queue() self.results = Queue() self.tracker = Queue() self.terminal_failure = None self.common_data = cPickle.dumps(None, -1) self.worker_data = None self.shutting_down = False self.start()
def __init__(self, opts, log): mj = opts.max_jobs if mj < 1: mj = detect_ncpus() self.log = log self.max_jobs = max(1, mj) self.max_job_time = max(0, opts.max_job_time * 60) self.lock = RLock() self.jobs = {} self.finished_jobs = {} self.events = Queue() self.job_id = count() self.waiting_job_ids = set() self.waiting_jobs = deque() self.max_block = None self.shutting_down = False self.event_loop = None
def __init__(self, opts, log, container_root): QObject.__init__(self) self.interceptor = RequestInterceptor(self) self.has_maths = {} self.interceptor.log = self.log = log self.interceptor.container_root = os.path.normcase(os.path.abspath(container_root)) self.interceptor.resources_root = os.path.normcase(os.path.abspath(os.path.dirname(mathjax_dir()))) ans = QWebEngineProfile(QApplication.instance()) ua = 'calibre-pdf-output ' + __version__ ans.setHttpUserAgent(ua) s = ans.settings() s.setDefaultTextEncoding('utf-8') ans.setUrlRequestInterceptor(self.interceptor) self.profile = ans self.opts = opts self.workers = [] self.max_workers = detect_ncpus() if iswindows: self.original_signal_handlers = {} else: self.original_signal_handlers = setup_unix_signals(self)
def compress_covers(path_map, jpeg_quality, progress_callback): input_queue = Queue() output_queue = Queue() num_workers = detect_ncpus() sz_map = {} for book_id, (path, sz) in path_map.items(): input_queue.put((book_id, path)) sz_map[book_id] = sz workers = [ Thread(target=compress_worker, args=(input_queue, output_queue, jpeg_quality), daemon=True, name=f'CCover-{i}') for i in range(num_workers) ] [w.start() for w in workers] pending = set(path_map) while pending: book_id, new_sz = output_queue.get() pending.remove(book_id) progress_callback(book_id, sz_map[book_id], new_sz) for w in workers: input_queue.put(None) for w in workers: w.join()
def create_spare_pool(self, *args): if self._spare_pool is None: num = min(detect_ncpus(), int(config['worker_limit'] / 2.0)) self._spare_pool = Pool(max_workers=num, name='GUIPool')
def create_spare_pool(self, *args): if self._spare_pool is None: num = min(detect_ncpus(), int(config['worker_limit']/2.0)) self._spare_pool = Pool(max_workers=num, name='GUIPool')
def compress_images(container, report=None, names=None, jpeg_quality=None, progress_callback=lambda n, t, name:True): images = get_compressible_images(container) if names is not None: images &= set(names) results = {} queue = Queue() abort = Event() for name in images: queue.put(name) def pc(name): keep_going = progress_callback(len(results), len(images), name) if not keep_going: abort.set() progress_callback(0, len(images), '') [Worker(abort, 'CompressImage%d' % i, queue, results, container, jpeg_quality, pc) for i in xrange(min(detect_ncpus(), len(images)))] queue.join() before_total = after_total = 0 for name, (ok, res) in results.iteritems(): name = force_unicode(name, filesystem_encoding) if ok: before, after = res before_total += before after_total += after if report: if before != after: report(_('{0} compressed from {1} to {2} bytes [{3:.1%} reduction]').format( name, human_readable(before), human_readable(after), (before - after)/before)) else: report(_('{0} could not be further compressed').format(name)) else: report(_('Failed to process {0} with error:').format(name)) report(res) if report: if before_total > 0: report('') report(_('Total image filesize reduced from {0} to {1} [{2:.1%} reduction]').format( human_readable(before_total), human_readable(after_total), (before_total - after_total)/before_total)) else: report(_('Images are already fully optimized')) return before_total > 0, results
def compress_images(container, report=None, names=None, jpeg_quality=None, progress_callback=lambda n, t, name:True): images = get_compressible_images(container) if names is not None: images &= set(names) results = {} queue = Queue() abort = Event() seen = set() num_to_process = 0 for name in sorted(images): path = os.path.abspath(container.get_file_path_for_processing(name)) path_key = os.path.normcase(path) if path_key not in seen: num_to_process += 1 queue.put((name, path, container.mime_map[name])) seen.add(path_key) def pc(name): keep_going = progress_callback(len(results), num_to_process, name) if not keep_going: abort.set() progress_callback(0, num_to_process, '') [Worker(abort, 'CompressImage%d' % i, queue, results, jpeg_quality, pc) for i in range(min(detect_ncpus(), num_to_process))] queue.join() before_total = after_total = 0 processed_num = 0 changed = False for name, (ok, res) in iteritems(results): name = force_unicode(name, filesystem_encoding) if ok: before, after = res if before != after: changed = True processed_num += 1 before_total += before after_total += after if report: if before != after: report(_('{0} compressed from {1} to {2} bytes [{3:.1%} reduction]').format( name, human_readable(before), human_readable(after), (before - after)/before)) else: report(_('{0} could not be further compressed').format(name)) else: report(_('Failed to process {0} with error:').format(name)) report(res) if report: if changed: report('') report(_('Total image filesize reduced from {0} to {1} [{2:.1%} reduction, {3} images changed]').format( human_readable(before_total), human_readable(after_total), (before_total - after_total)/before_total, processed_num)) else: report(_('Images are already fully optimized')) return changed, results