def create_themeball(report, progress=None, abort=None): pool = ThreadPool(processes=cpu_count()) buf = BytesIO() num = count() error_occurred = Event() def optimize(name): if abort is not None and abort.is_set(): return if error_occurred.is_set(): return try: i = next(num) if progress is not None: progress(i, _('Optimizing %s') % name) srcpath = os.path.join(report.path, name) ext = srcpath.rpartition('.')[-1].lower() if ext == 'png': optimize_png(srcpath) elif ext in ('jpg', 'jpeg'): optimize_jpeg(srcpath) except Exception: return sys.exc_info() errors = tuple(filter(None, pool.map(optimize, tuple(report.name_map)))) pool.close(), pool.join() if abort is not None and abort.is_set(): return if errors: e = errors[0] reraise(*e) if progress is not None: progress(next(num), _('Creating theme file')) with ZipFile(buf, 'w') as zf: for name in report.name_map: srcpath = os.path.join(report.path, name) with lopen(srcpath, 'rb') as f: zf.writestr(name, f.read(), compression=ZIP_STORED) buf.seek(0) if abort is not None and abort.is_set(): return None, None if progress is not None: progress(next(num), _('Compressing theme file')) import lzma compressed = lzma.compress(buf.getvalue(), format=lzma.FORMAT_XZ, preset=9) buf = BytesIO() prefix = report.name if abort is not None and abort.is_set(): return None, None with ZipFile(buf, 'w') as zf: with lopen(os.path.join(report.path, THEME_METADATA), 'rb') as f: zf.writestr(prefix + '/' + THEME_METADATA, f.read()) zf.writestr(prefix + '/' + THEME_COVER, create_cover(report)) zf.writestr(prefix + '/' + 'icons.zip.xz', compressed, compression=ZIP_STORED) if progress is not None: progress(next(num), _('Finished')) return buf.getvalue(), prefix
def __init__(self, items, level1=DEFAULT_LEVEL1, level2=DEFAULT_LEVEL2, level3=DEFAULT_LEVEL3, scorer=None): with wlock: if not workers: requests, results = Queue(), Queue() w = [ Worker(requests, results) for i in range(max(1, cpu_count())) ] [x.start() for x in w] workers.extend(w) items = map(lambda x: normalize('NFC', str(x)), filter(None, items)) self.items = items = tuple(items) tasks = split(items, len(workers)) self.task_maps = [{j: i for j, (i, _) in enumerate(task)} for task in tasks] scorer = scorer or default_scorer self.scorers = [ scorer(tuple(map(itemgetter(1), task_items))) for task_items in tasks ] self.sort_keys = None
def __init__(self, notify_on_job_done=lambda x: x, pool_size=None, limit=sys.maxsize, enforce_cpu_limit=True): Thread.__init__(self) self.daemon = True global _counter self.id = _counter + 1 _counter += 1 if enforce_cpu_limit: limit = min(limit, cpu_count()) self.pool_size = limit if pool_size is None else pool_size self.notify_on_job_done = notify_on_job_done self.auth_key = os.urandom(32) self.address, self.listener = create_listener(self.auth_key, backlog=4) self.add_jobs_queue, self.changed_jobs_queue = Queue(), Queue() self.kill_queue = Queue() self.waiting_jobs = [] self.workers = deque() self.launched_worker_count = 0 self._worker_launch_lock = RLock() self.start()
def __init__(self, notify_on_job_done=lambda x: x, pool_size=None, limit=sys.maxint, enforce_cpu_limit=True): Thread.__init__(self) self.daemon = True global _counter self.id = _counter+1 _counter += 1 if enforce_cpu_limit: limit = min(limit, cpu_count()) self.pool_size = limit if pool_size is None else pool_size self.notify_on_job_done = notify_on_job_done self.auth_key = os.urandom(32) self.address = arbitrary_address('AF_PIPE' if iswindows else 'AF_UNIX') if iswindows and self.address[1] == ':': self.address = self.address[2:] self.listener = Listener(address=self.address, authkey=self.auth_key, backlog=4) self.add_jobs_queue, self.changed_jobs_queue = Queue(), Queue() self.kill_queue = Queue() self.waiting_jobs = [] self.workers = deque() self.launched_worker_count = 0 self._worker_launch_lock = RLock() self.start()
def __init__(self, notify_on_job_done=lambda x: x, pool_size=None, limit=sys.maxint, enforce_cpu_limit=True): Thread.__init__(self) self.daemon = True global _counter self.id = _counter + 1 _counter += 1 if enforce_cpu_limit: limit = min(limit, cpu_count()) self.pool_size = limit if pool_size is None else pool_size self.notify_on_job_done = notify_on_job_done self.auth_key = os.urandom(32) self.address = arbitrary_address('AF_PIPE' if iswindows else 'AF_UNIX') if iswindows and self.address[1] == ':': self.address = self.address[2:] self.listener = Listener(address=self.address, authkey=self.auth_key, backlog=4) self.add_jobs_queue, self.changed_jobs_queue = Queue(), Queue() self.kill_queue = Queue() self.waiting_jobs = [] self.workers = deque() self.launched_worker_count = 0 self._worker_launch_lock = RLock() self.start()
def create_themeball(report, progress=None, abort=None): pool = ThreadPool(processes=cpu_count()) buf = BytesIO() num = count() error_occurred = Event() def optimize(name): if abort is not None and abort.is_set(): return if error_occurred.is_set(): return try: i = next(num) if progress is not None: progress(i, _('Optimizing %s') % name) srcpath = os.path.join(report.path, name) ext = srcpath.rpartition('.')[-1].lower() if ext == 'png': optimize_png(srcpath) elif ext in ('jpg', 'jpeg'): optimize_jpeg(srcpath) except Exception: return sys.exc_info() errors = tuple(filter(None, pool.map(optimize, tuple(report.name_map.iterkeys())))) pool.close(), pool.join() if abort is not None and abort.is_set(): return if errors: e = errors[0] raise e[0], e[1], e[2] if progress is not None: progress(next(num), _('Creating theme file')) with ZipFile(buf, 'w') as zf: for name in report.name_map: srcpath = os.path.join(report.path, name) with lopen(srcpath, 'rb') as f: zf.writestr(name, f.read(), compression=ZIP_STORED) buf.seek(0) out = BytesIO() if abort is not None and abort.is_set(): return None, None if progress is not None: progress(next(num), _('Compressing theme file')) compress(buf, out, level=9) buf = BytesIO() prefix = report.name if abort is not None and abort.is_set(): return None, None with ZipFile(buf, 'w') as zf: with lopen(os.path.join(report.path, THEME_METADATA), 'rb') as f: zf.writestr(prefix + '/' + THEME_METADATA, f.read()) zf.writestr(prefix + '/' + THEME_COVER, create_cover(report)) zf.writestr(prefix + '/' + 'icons.zip.xz', out.getvalue(), compression=ZIP_STORED) if progress is not None: progress(next(num), _('Finished')) return buf.getvalue(), prefix
def run_checkers(func, args_list): num = cpu_count() pool = ThreadPool(num) ans = [] for result, tb in pool.map(partial(worker, func), args_list): if tb is not None: raise Exception("Failed to run worker: \n%s" % tb) ans.extend(result) return ans
def run_checkers(func, args_list): num = cpu_count() pool = ThreadPool(num) ans = [] for result, tb in pool.map(partial(worker, func), args_list): if tb is not None: raise Exception('Failed to run worker: \n%s' % tb) ans.extend(result) return ans
def __init__(self, items, level1=DEFAULT_LEVEL1, level2=DEFAULT_LEVEL2, level3=DEFAULT_LEVEL3, scorer=None): with wlock: if not workers: requests, results = Queue(), Queue() w = [Worker(requests, results) for i in range(max(1, cpu_count()))] [x.start() for x in w] workers.extend(w) items = map(lambda x: normalize('NFC', unicode(x)), filter(None, items)) self.items = items = tuple(items) tasks = split(items, len(workers)) self.task_maps = [{j:i for j, (i, _) in enumerate(task)} for task in tasks] scorer = scorer or default_scorer self.scorers = [scorer(tuple(map(itemgetter(1), task_items))) for task_items in tasks] self.sort_keys = None
def __init__(self, notify_on_job_done=lambda x: x, pool_size=None, limit=sys.maxsize, enforce_cpu_limit=True): Thread.__init__(self) self.daemon = True self.id = next(server_counter) + 1 if enforce_cpu_limit: limit = min(limit, cpu_count()) self.pool_size = limit if pool_size is None else pool_size self.notify_on_job_done = notify_on_job_done self.add_jobs_queue, self.changed_jobs_queue = Queue(), Queue() self.kill_queue = Queue() self.waiting_jobs = [] self.workers = deque() self.launched_worker_counter = count() next(self.launched_worker_counter) self.start()
def __init__(self, notify_on_job_done=lambda x: x, pool_size=None, limit=sys.maxsize, enforce_cpu_limit=True): Thread.__init__(self) self.daemon = True global _counter self.id = _counter+1 _counter += 1 if enforce_cpu_limit: limit = min(limit, cpu_count()) self.pool_size = limit if pool_size is None else pool_size self.notify_on_job_done = notify_on_job_done self.auth_key = os.urandom(32) self.address, self.listener = create_listener(self.auth_key, backlog=4) self.add_jobs_queue, self.changed_jobs_queue = Queue(), Queue() self.kill_queue = Queue() self.waiting_jobs = [] self.workers = deque() self.launched_worker_count = 0 self._worker_launch_lock = RLock() self.start()
def __init__(self): self.workers = [] self.max_workers = cpu_count()