def count(*args, **kwargs): if kwargs: return _count(*args, **kwargs) if len(args) != 1: return _count(*args) arg = args[0] try: it = iter(arg) except TypeError: return _count(arg) return len(set(it))
def count(*args, **kwargs): if kwargs: return _count(*args, **kwargs) if len(args) != 1: return _count(*args) arg = args[0] if hasattr(arg, 'count'): return arg.count() try: it = iter(arg) except TypeError: return _count(arg) return len(set(it))
def sieve(last): primes = range(last) for i in _count(2): if i * i >= last: break if primes[i] == 0: continue for j in _count(2): if i * j >= last: break primes[i * j] = 0 return list(prime for prime in primes if prime >= 2)
def pythagorean_triples(n=None): """Generate n Pythagorean triples ordered by the value of c ascending. If n is None or not given, generate infinitly. Default is None. Examples: >>> list(pythagorean_triples(5)) [(3, 4, 5), (5, 12, 13), (15, 8, 17), (7, 24, 25), (21, 20, 29)] """ iterator = _count() if n is None else range(n) iterator = iter(iterator) base_mat = ((1, 2, 2), (2, 1, 2), (2, 2, 3)) multiplier = ((1, -1, 1), (1, 1, 1), (-1, 1, 1)) matrices = [] for multip in multiplier: mat = [] for row, elem in zip(base_mat, multip): mat.append(tuple(map(lambda e: e * elem, row))) matrices.append(tuple(mat)) matrices = tuple(matrices) heap = [(5, (3, 4, 5))] for i in iterator: _, triple = _heappop(heap) yield triple for matrix in matrices: next_triple = tuple(map(lambda col: sum(_starmap(_mul, zip(triple, col))), zip(*matrix))) _heappush(heap, (next_triple[2], next_triple))
def counter(): if count: for n in range(count): yield n else: for n in _count(): yield n
def frange(start, stop, step): """ range of floats """ for i in _count(): curr = start + i * step if curr >= stop: break else: yield curr
def __init__(self, workers_count, piece_queue): self._piece_queue = piece_queue self._hash_queue = ExhaustQueue(name='hashes') self._workers_count = workers_count self._stop = False self._name_counter = _count().__next__ self._name_counter() # Consume 0 so first worker is 1 self._name_counter_lock = threading.Lock() self._pool = ThreadPool(workers_count, self._worker)
def fromBox(self, name, strings, objects, proto): value = _StringIO() value.write(strings.get(name)) for counter in _count(2): chunk = strings.get("%s.%d" % (name, counter)) if chunk is None: break value.write(chunk) objects[name] = self.build_value(value.getvalue())
def _primes() -> _Iterable[int]: n: int = 2 yield n for n in _count(3, 2): if all((_is_not_div(n, x) for x in range(2, n // 2))): yield n else: continue
def _splitter(stringiterable, name=_unknownname): if name is _unknownname and hasattr(stringiterable, 'name'): name = stringiterable.name return iter( tdtokens(name, line, *text__tokens) for line, text__tokens in _izip( _count(1), iter(( text, _str_split(text), ) for text in stringiterable)) if text__tokens[1])
def paths(in_snot,base_path="$"): def join_lists(a,b): return a+b if hasattr(in_snot,iteritems_attr): return reduce(join_lists,[paths(v,base_path+"["+k+"]") for k,v in getattr(in_snot,iteritems_attr)()]) elif isinstance(in_snot,list): return reduce(join_lists,[paths(v,base_path+"["+str(k)+"]") for k,v in _izip(_count(),in_snot)]) else: return [str(base_path)]
def __init__(self, name, time, colnames, data, embryospergene=None): self.name = name self.time = time self.column_names = colnames self.column_index = _OD(_izip(colnames, _count())) self.pos = None self.Npos = None self.pos_other = dict() if "x" in self.column_index and "y" in self.column_index and "z" in self.column_index: self.pos = _np.vstack( [data[:, self.column_index[j]] for j in ["x", "y", "z"]]).T if "Nx" in self.column_index and "Ny" in self.column_index and "Nz" in self.column_index: self.Npos = _np.vstack( [data[:, self.column_index[j]] for j in ["Nx", "Ny", "Nz"]]).T self.data = data self.embryospergene = embryospergene
def _populate_py(in_snot, in_vector, begin=0): num_consumed = 0 the_iter = None if hasattr(in_snot, iteritems_attr): the_iter = getattr(in_snot, iteritems_attr)() elif isinstance(in_snot, list): the_iter = _izip(_count(), in_snot) else: pass #should not descend into scalars. for key, val in the_iter: if hasattr(val, iteritems_attr) or isinstance(val, list): num_consumed += _populate_py(val, in_vector, begin + num_consumed) else: in_snot[key] = in_vector[begin + num_consumed] num_consumed += 1 return num_consumed
def inf_cycle(input_iter): r"""iterz.inf_cycle(input_iter) This will cycle an iterator indefinitely. Usage: >>> alist = [1, 2, 4, 8] >>> inf_cycle_iter = iterz.inf_cycle(alist) >>> for _ in range(9): ... print(inf_cycle_iter.__next__()) 1 2 4 8 1 2 4 8 1 In fact, it works with any iterable containing any datatypes: >>> atuple = (1, 'foo', 3.0) >>> inf_cycle_iter = iterz.inf_cycle(atuple) >>> for i in range(5): ... print(inf_cycle_iter.__next__()) 1 foo 3.0 1 foo """ try: iterator = iter(input_iter) except: raise TypeError('\'input_iter\' must be \'iter\'') if len(input_iter) < 1: return for i in _count(): yield input_iter[i % len(input_iter)]
return self._count return 0 @property def broken(self): """Return True if the barrier is in a broken state.""" return self._state == -2 # exception raised by the Barrier class class BrokenBarrierError(RuntimeError): pass # Helper to generate new thread names _counter = _count().__next__ _counter() # Consume 0 so first non-main thread has id 1. def _newname(template="Thread-%d"): return template % _counter() # Active thread administration _active_limbo_lock = _allocate_lock() _active = {} # maps thread id to Thread object _limbo = {} _dangling = WeakSet() # Main class for threads
def _format_iter(data): # make a nice text string from iter data = list(data) result = '' if len(data) == 1: result = data[0] elif len(data) == 2: result = ' and '.join(data) else: result = ', '.join(data[:-1]) result += ', and {}'.format(data[-1]) return result # Helper to generate new thread names _counter = _count() next(_counter) def _new_thread_name(template="read_until-%d"): return template % next(_counter) # The maximum allowed minimum read chunk size. Filtering of small read chunks # from the gRPC stream is buggy. The value 0 effectively disables the # filtering functionality. ALLOWED_MIN_CHUNK_SIZE = 0 class ReadUntilClient(object): def __init__(self, mk_host='127.0.0.1', mk_port=8000, cache_size=512, cache_type=ReadCache, filter_strands=True, one_chunk=True, prefilter_classes={'strand', 'adapter'}):
def optimizeImage(self, image, baseValue, iterations=None, maxError=0.01, sectionGenerator=None, goal=0.5, axis=0, doError=False, useLerp=True): if iterations is None and maxError is None: raise ValueError('Either iterations or error must not be None') image = image.convert('RGB') if sectionGenerator is None: sectionGenerator = FullSectionGenerator(seed=self.seed) A = _np.array(image) totalPixels = A.shape[0] * A.shape[1] xRange = A.shape[axis] sections = sectionGenerator.generate(xRange) mi, ma = 0.0, 0.5 miy, may = 0.0, 1.0 if iterations is not None: iterator = range(iterations) else: iterator = _count() for i in iterator: if useLerp: v = (goal - miy) * (ma - mi) / (may - miy) else: v = (mi + ma) / 2 p = self._test(i, A, v, baseValue, totalPixels, sections, axis) if p > goal: ma = v may = p else: mi = v miy = p E = abs(p - goal) _logging.log( 8, f'Iteration = {i} : Error = {E} : Max Error = {maxError}') if maxError is not None and E < maxError: break if doError: p = self._test(-1, A, v, baseValue, totalPixels, sections, axis) _logging.log(8, f'Error - {abs(goal - p)}') self.lo = (baseValue - v) % 1 self.up = (baseValue + v) % 1
if six.WINDOWS: from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW __all__ = ['WolframKernelController'] logger = logging.getLogger(__name__) TO_PY_LOG_LEVEL = { 1: logging.DEBUG, 2: logging.INFO, 3: logging.WARN, 4: logging.FATAL } FROM_PY_LOG_LEVEL = dict((v, k) for k, v in TO_PY_LOG_LEVEL.items()) _thread_counter = _count().__next__ _thread_counter() class KernelLogger(Thread): """ Asynchronous logger for kernel messages. A consumer of messages read from a PUB/SUB socket that turn them into log messages as expected by the :mod:`logging` module. """ MAX_MESSAGE_BEFORE_QUIT = 32 def __init__(self, name=None, level=logging.WARN): super().__init__(name=name) self.socket = Socket(zmq_type=zmq.SUB) self.socket.bind()
When the timeout argument is present and not None, it should be a floating point number specifying a timeout for the operation in seconds (or fractions thereof). This method returns the internal flag on exit, so it will always return True except if a timeout is given and the operation times out. """ with self.__cond: if not self.__flag: self.__cond.wait(timeout) return self.__flag # Helper to generate new thread names _counter = _count().next _counter() # Consume 0 so first non-main thread has id 1. def _newname(template="Thread-%d"): return template % _counter() # Active thread administration _active_limbo_lock = _allocate_lock() _active = {} # maps thread id to Thread object _limbo = {} # Main class for threads class Thread(_Verbose): """A class that represents a thread of control.
def count(start=0, step=1): for i in _count(): yield start + step * i
if self._state == 0: return self._count return 0 @property def broken(self): """Return True if the barrier is in a broken state.""" return self._state == -2 # exception raised by the Barrier class class BrokenBarrierError(RuntimeError): pass # Helper to generate new thread names _counter = _count().__next__ _counter() # Consume 0 so first non-main thread has id 1. def _newname(template="Thread-%d"): return template % _counter() # Active thread administration _active_limbo_lock = _allocate_lock() _active = {} # maps thread id to Thread object _limbo = {} _dangling = WeakSet() # Main class for threads class Thread: """A class that represents a thread of control.
def _ordinary_tree_generator(sort=False): """Yields all unordered trees by increasing order. """ for order in _count(1): for tree in the_trees[order].trees(sort): yield tree
def zip_inf_cycle(*input_iters) -> tuple: r"""iterz.zip_inf_cycle(*input_iters) Similar to zip but cycles all lists indefinitely. Usage: >>> alist = [1, 2] >>> blist = [4, 5, 6, 7, 8] >>> zip_inf_cycle_iter = iterz.zip_inf_cycle(alist, blist) >>> for _ in range(9): ... print(zip_inf_cycle_iter.__next__()) 1 4 2 5 1 6 2 7 1 8 2 4 1 5 2 6 1 7 It also works with multiple lists: >>> alist = [1, 2] >>> blist = [1, 2, 3] >>> clist = [1, 2, 3, 4] >>> dlist = [1, 2, 3, 4, 5] >>> zip_inf_cycle_iter = iterz.zip_inf_cycle(alist, blist, clist, dlist) >>> for i in range(7): ... print(zip_inf_cycle_iter.__next__()) 1 1 1 1 2 2 2 2 1 3 3 3 2 1 4 4 1 2 1 5 1 3 2 1 2 1 3 2 In fact, it works with any iterable containing any datatypes: >>> a = (1, 2, 3) >>> b = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0] >>> c = 'abcde' >>> zip_inf_cycle_iter = iterz.zip_inf_cycle(a, b, c) >>> for i in range(10): ... print(zip_inf_cycle_iter.__next__()) 1 1.0 a 2 2.0 b 3 3.0 c 1 4.0 d 2 5.0 e 3 6.0 a 1 7.0 b 2 1.0 c 3 2.0 d 1 3.0 e """ for input_iter in input_iters: try: iterator = iter(input_iter) except: raise TypeError('\'*input_iters\' must be one or more \'iter\'') if any(len(input_iter) == 0 for input_iter in input_iters): raise IndexError('all elements of \'*input_iters\' must have len > 0') for i in _count(): output_list = [] for input_iter in input_iters: output_list.append(input_iter[i % len(input_iter)]) yield tuple(output_list)
def _splitter(stringiterable, name=_unknownname): if name is _unknownname and hasattr(stringiterable, 'name'): name = stringiterable.name return iter(tdtokens(name, line, *text__tokens) for line, text__tokens in _izip(_count(1), iter((text, _str_split(text),) for text in stringiterable)) if text__tokens[1])