def enumerate_devices (inactive=False): '''Returns an iterator over all active ('UP') ethernet devices in the form of EthernetDevice objects. If the 'inactive' keyword evaluates to a true value, inactive devices are included. ''' from socket import AF_INET, SOCK_DGRAM s = _socket(AF_INET, SOCK_DGRAM) bufsize = 1024 buf = _array('B', '\0' * bufsize) bufptr = buf.buffer_info()[0] ifconf = _array('B',struct.pack('iP', bufsize, bufptr)) if _ioctl(s.fileno(),SIOCGIFCONF,ifconf,True) == 0: l,bufptr = struct.unpack('iP',ifconf.tostring()) size = EthernetDevice.IFREQ_SIZE fmt = EthernetDevice._struct_ifreq_addr while l >= size: intf = buf[:size].tostring() dev = EthernetDevice(ifreq_struct=intf, sock=s) if inactive or dev.has_flags('UP'): yield dev del dev l -= size del buf[:size] del s, buf, ifconf
def enumerate_devices(inactive=False): '''Returns an iterator over all active ('UP') ethernet devices in the form of EthernetDevice objects. If the 'inactive' keyword evaluates to a true value, inactive devices are included. ''' from socket import AF_INET, SOCK_DGRAM s = _socket(AF_INET, SOCK_DGRAM) bufsize = 1024 buf = _array('B', '\0' * bufsize) bufptr = buf.buffer_info()[0] ifconf = _array('B', struct.pack('iP', bufsize, bufptr)) if _ioctl(s.fileno(), SIOCGIFCONF, ifconf, True) == 0: l, bufptr = struct.unpack('iP', ifconf.tostring()) size = EthernetDevice.IFREQ_SIZE fmt = EthernetDevice._struct_ifreq_addr while l >= size: intf = buf[:size].tostring() dev = EthernetDevice(ifreq_struct=intf, sock=s) if inactive or dev.has_flags('UP'): yield dev del dev l -= size del buf[:size] del s, buf, ifconf
def step2_lcp(self): n = len(self.res) init = [0] * n rank = _array("i", init) LCP = _array("i", init) s = self.global_suffix suffix__array = self.res endAt = self.endAt for i in range(len(self.array_str), n): v = self.res[i] rank[v] = i l = 0 for j in range(n): if l > 0: l -= 1 i = rank[j] j2 = suffix__array[i - 1] if i: while l + j < endAt[j] and l + j2 < endAt[j2] and s[ j + l] == s[j2 + l]: l += 1 LCP[i - 1] = l else: l = 0 self.lcp = LCP
def direct_kark_sort(s): alphabet = [None] + sorted(set(s)) k = len(alphabet) n = len(s) t = dict((c, i) for i, c in enumerate(alphabet)) SA = _array("i", [0] * (n + 3)) kark_sort(_array("i", [t[c] for c in s] + [0] * 3), SA, n, k) return SA[:n]
def next(self, increment=1): incr = _array('i', [increment]) nval = _array('i', [0]) self.win.Lock(0) self.win.Get_accumulate([incr, 1, MPI.INT], [nval, 1, MPI.INT], 0, op=MPI.SUM) self.win.Unlock(0) return nval[0]
def Export(self): if (self.Data < 1): e = _array('B', [0xF1, self.Data]) return e else: e = _array('B', [0xF1]) e.extend(_struct.pack(_encoding.ENC_WORD, -self.Data)) return e
def __init__(self, string, unit=DEFAULT_UNIT, encoding=DEFAULT_ENCODING, noLCPs=False): if unit == UNIT_WORD: self.tokSep = " " elif unit in (UNIT_CHARACTER, UNIT_BYTE): self.tokSep = "" else: raise Exception("Unknown unit type identifier:", unit) start = _time() self.unit = unit self.encoding = encoding if _trace: print >> _stderr, "Tokenization ...\r", string = self.tokenize(string) if _trace: print >> _stderr, "Tokenization done" if _trace: print >> _stderr, "Renaming tokens ...\r", self.voc = [None] + sorted(set(string)) self.tokId = dict((char, iChar) for iChar, char in enumerate(self.voc)) self.string = [self.tokId[c] for c in string] if _trace: print >> _stderr, "Renaming tokens done" self.vocSize = len(self.voc) self.length = len(string) self.SA = _array("i", [0] * (self.length + 3)) self.string = _array("i", self.string + [0] * 3) _suffixArray(self.string, self.SA, self.length, self.vocSize) del self.SA[self.length:] del self.string[self.length:] self.nbSentences = self.string.count(self.tokId.get("\n", 0)) self.length = len(string) self.vocSize = len(self.voc) - 1 # decrement because of the None token if "\n" in self.tokId: self.vocSize -= 1 # decrement because of the EOL token self.features = [] if not noLCPs: self.addFeatureSA(LCP) self.constructionTime = _time() - start if _trace: print >> _stderr, "construction time %.3fs" % self.constructionTime
def Export(self): if (self.Clocks > 256): clocks = self.Clocks cmds = _array('B') while (clocks > 256): cmds.extend(Ext_17_AddLenght(256).Export()) clocks -= 256 cmds.extend(Ext_17_AddLenght(clocks).Export()) return cmds else: return _array('B', [_CMD_EXT, 0x05, self.Clocks - 1])
def extend(self, n): """Grow the sieve to cover all primes <= n (a real number). Examples ======== >>> from sympy import sieve >>> sieve._reset() # this line for doctest only >>> sieve.extend(30) >>> sieve[10] == 29 True """ n = int(n) if n <= self._list[-1]: return # We need to sieve against all bases up to sqrt(n). # This is a recursive call that will do nothing if there are enough # known bases already. maxbase = int(n**0.5) + 1 self.extend(maxbase) # Create a new sieve starting from sqrt(n) begin = self._list[-1] + 1 newsieve = _arange(begin, n + 1) # Now eliminate all multiples of primes in [2, sqrt(n)] for p in self.primerange(2, maxbase): # Start counting at a multiple of p, offsetting # the index to account for the new sieve's base index startindex = (-begin) % p for i in range(startindex, len(newsieve), p): newsieve[i] = 0 # Merge the sieves self._list += _array('l', [x for x in newsieve if x])
def _add_median_age(self): d = self.statistics.data if '2011' not in d: return d = d['2011'] if 'population' not in d: return d = d['population'] if 'by-age' not in d or 'total' not in d['by-age']: return # Simple, brute-force median algorithm median_array = _array('f') for i, bucket_count in enumerate(d['by-age']['total']): if bucket_count <= 0: continue # We'll spread points out evenly, like this: # given: 0 [-----------------------------] 4 (i.e., <5) # and a count of three: # 1. Divide into three # 0 [---------|---------|---------] 5 # 2. Put the counts halfway # 0 [----x----|----x----|----x----] 5 # Ages: 5/6, 15/6, 25/6 gap = 5.0 / bucket_count bottom = i * 5.0 nextval = bottom + gap / 2 for i in range(0, bucket_count): median_array.append(nextval) nextval += gap if len(median_array) == 0: d['median-age'] = 0 # This is what StatsCan does else: d['median-age'] = median_array[int(len(median_array) / 2)]
def extend(self, N): """Grow the sieve to cover all numbers <= N. Examples ======== >>> from sympy.ntheory import sieve >>> sieve.extend(30) >>> sieve[10] == 29 True """ if N <= self._list[-1]: return # We need to sieve against all bases up to sqrt(n). If there # are too few, extend the list recursively. maxbase = int(N**0.5) + 1 self.extend(maxbase) # Create a new sieve starting from N**0.5 begin = self._list[-1] + 1 newsieve = _arange(begin, N + 1) # Now eliminate all multiples of primes in [2, N**0.5] for p in self.primerange(2, maxbase): # Start counting at a multiple of p, offsetting # the index to account for the new sieve's base index startindex = (-begin) % p for i in xrange(startindex, len(newsieve), p): newsieve[i] = 0 # Merge the sieves self._list += _array('l', [x for x in newsieve if x])
def process(self): sums = [ self.agem[i] + self.agef[i] for i in xrange(0, len(self.agem)) ] count = sum(sums) if count == 0: self.median = None self.male_percentage = None return median_array = _array('f') for i, bucket_count in enumerate(sums): if bucket_count <= 0: continue # We'll spread points out evenly, like this: # given: 0 [-----------------------------] 4 (i.e., <5) # and a count of three: # 1. Divide into three # 0 [---------|---------|---------] 5 # 2. Put the counts halfway # 0 [----x----|----x----|----x----] 5 # Ages: 5/6, 15/6, 25/6 gap = 5.0 / bucket_count bottom = i * 5.0 nextval = bottom + gap / 2 for i in xrange(0, bucket_count): median_array.append(nextval) nextval += gap self.median = median_array[len(median_array)/2] count_male = sum(self.agem) self.male_percentage = 100.0 * float(count_male) / count
def process(self): sums = [self.agem[i] + self.agef[i] for i in xrange(0, len(self.agem))] count = sum(sums) if count == 0: self.median = None self.male_percentage = None return median_array = _array('f') for i, bucket_count in enumerate(sums): if bucket_count <= 0: continue # We'll spread points out evenly, like this: # given: 0 [-----------------------------] 4 (i.e., <5) # and a count of three: # 1. Divide into three # 0 [---------|---------|---------] 5 # 2. Put the counts halfway # 0 [----x----|----x----|----x----] 5 # Ages: 5/6, 15/6, 25/6 gap = 5.0 / bucket_count bottom = i * 5.0 nextval = bottom + gap / 2 for i in xrange(0, bucket_count): median_array.append(nextval) nextval += gap self.median = median_array[len(median_array) / 2] count_male = sum(self.agem) self.male_percentage = 100.0 * float(count_male) / count
def extend(self, N): """Grow the sieve to cover all numbers <= N. Examples ======== >>> from sympy.ntheory import sieve >>> sieve.extend(30) >>> sieve[10] == 29 True """ if N <= self._list[-1]: return # We need to sieve against all bases up to sqrt(n). If there # are too few, extend the list recursively. maxbase = int(N**0.5)+1 self.extend(maxbase) # Create a new sieve starting from N**0.5 begin = self._list[-1] + 1 newsieve = _arange(begin, N+1) # Now eliminate all multiples of primes in [2, N**0.5] for p in self.primerange(2, maxbase): # Start counting at a multiple of p, offsetting # the index to account for the new sieve's base index startindex = (-begin) % p for i in xrange(startindex, len(newsieve), p): newsieve[i] = 0 # Merge the sieves self._list += _array('l', [x for x in newsieve if x])
def _add_median_age(self): d = self.statistics.data if '2011' not in d: return d = d['2011'] if 'population' not in d: return d = d['population'] if 'by-age' not in d or 'total' not in d['by-age']: return # Simple, brute-force median algorithm median_array = _array('f') for i, bucket_count in enumerate(d['by-age']['total']): if bucket_count <= 0: continue # We'll spread points out evenly, like this: # given: 0 [-----------------------------] 4 (i.e., <5) # and a count of three: # 1. Divide into three # 0 [---------|---------|---------] 5 # 2. Put the counts halfway # 0 [----x----|----x----|----x----] 5 # Ages: 5/6, 15/6, 25/6 gap = 5.0 / bucket_count bottom = i * 5.0 nextval = bottom + gap / 2 for i in range(0, bucket_count): median_array.append(nextval) nextval += gap if len(median_array) == 0: d['median-age'] = 0 # This is what StatsCan does else: d['median-age'] = median_array[int(len(median_array)/2)]
def __init__(self, string, unit=DEFAULT_UNIT, encoding=DEFAULT_ENCODING, noLCPs=False): if unit==UNIT_WORD: self.tokSep=" " elif unit in (UNIT_CHARACTER, UNIT_BYTE): self.tokSep="" else: raise Exception("Unknown unit type identifier:", unit) start=_time() self.unit = unit self.encoding = encoding if _trace: print >> _stderr, "Tokenization ...\r", string = self.tokenize(string) if _trace: print >> _stderr, "Tokenization done" if _trace: print >> _stderr, "Renaming tokens ...\r", self.voc = [None]+sorted(set(string)) self.tokId = dict((char, iChar) for iChar,char in enumerate(self.voc)) self.string = [self.tokId[c] for c in string] if _trace: print >> _stderr, "Renaming tokens done" self.vocSize= len(self.voc) self.length = len(string) self.SA = _array("i", [0]*(self.length+3)) self.string = _array("i", self.string+[0]*3) _suffixArray(self.string, self.SA, self.length, self.vocSize) del self.SA[self.length:] del self.string[self.length:] self.nbSentences = self.string.count(self.tokId.get("\n", 0)) self.length = len(string) self.vocSize= len(self.voc) - 1 # decrement because of the None token if "\n" in self.tokId: self.vocSize-=1 # decrement because of the EOL token self.features=[] if not noLCPs: self.addFeatureSA(LCP) self.constructionTime=_time()-start if _trace: print >> _stderr, "construction time %.3fs"%self.constructionTime
def _ioctl (self, op, fmt, *args): if struct.calcsize(fmt) < self.IFREQ_SIZE: fmt += ('%dx' % (self.IFREQ_SIZE - struct.calcsize(fmt))) if args and isinstance(args[0], basestring): args = args[1:] if not args: args = struct.unpack(fmt,_array('B', '\0' * self.IFREQ_SIZE))[1:] data = _array('B',struct.pack(fmt,self.name,*args)) r = _ioctl(self.sock, op, data, True) if r != 0: raise IOError, 'ioctl(%d) failed: %d' % (op,r) return struct.unpack(fmt,data.tostring())
def _ioctl(self, op, fmt, *args): if struct.calcsize(fmt) < self.IFREQ_SIZE: fmt += ('%dx' % (self.IFREQ_SIZE - struct.calcsize(fmt))) if args and isinstance(args[0], basestring): args = args[1:] if not args: args = struct.unpack(fmt, _array('B', '\0' * self.IFREQ_SIZE))[1:] data = _array('B', struct.pack(fmt, self.name, *args)) r = _ioctl(self.sock, op, data, True) if r != 0: raise IOError, 'ioctl(%d) failed: %d' % (op, r) return struct.unpack(fmt, data.tostring())
def binary_search_insert(n, array): answer = 0 new_array = _array('I', [array[0]]) for i in range(1, n): index = bisect.bisect_right(new_array, array[i]) answer += i - index new_array.insert(index, array[i]) return answer
def Export(self): if (self.Clocks > 128): clocks = self.Clocks cmds = _array('B') while (clocks > 128): cmds.extend(Rest(128).Export()) clocks -= 128 cmds.extend(Rest(clocks).Export()) return cmds elif (self.Clocks < 1): return _array('B') else: return _array('B', [self.Clocks - 1])
def s9_append(arr, buf, force_clear=False): """ encode as much numbers from array obj buf as possible and append them to array obj arr. returns count of encoded and appended numbers. force_clear: encode everything from buf and fill the rest of last 4-byte word with zeros if needed. """ buf_start = 0 sizes_init = [1, 2, 3, 4, 5, 7, 9, 14, 28] maxsize_idx = 0 count = 0 words = _array('I') while(True): sizes = sizes_init maxsize_idx = 0 count = 0 for n in buf[buf_start:]: count += 1 for j, size in enumerate(sizes[maxsize_idx:]): # measuring size of number # (and max number size in current portion): if (n >> size) == 0: # n has size of "size" maxsize_idx += j break max_size = sizes[maxsize_idx] selector, need_count, waste = s9_by_size[max_size] if count >= need_count: word = s9_encode_word( buf, buf_start, selector, max_size, need_count, waste ) words.append(word) buf_start += need_count break # in worst case we will measure 13 last measured numbers again else: if force_clear and buf_start < len(buf): word = s9_encode_word( buf[buf_start:] + _array('I', [0] * (need_count - count)), 0, selector, max_size, need_count, waste ) words.append(word) read_ = len(buf) else: read_ = buf_start arr.extend(words) return read_
def readu_str(self, size=None): a = _array('B', self.read(size)) b = '' for i in a: if not (i == 0): b += bytes([i]).decode() if b == '': b = 0 return int(b)
def next(self): # group = self.win.Get_group() size = group.Get_size() rank = group.Get_rank() group.Free() # incr = _array('i', [1]) vals = _array('i', [0]) * size self.win.Lock(MPI.LOCK_EXCLUSIVE, 0, 0) self.win.Accumulate([incr, 1, MPI.INT], 0, [rank, 1, MPI.INT], MPI.SUM) self.win.Get([vals, 1, self.dt_get], 0, [0, 1, self.dt_get]) self.win.Unlock(0) # vals[rank] = self.myval self.myval += 1 nxtval = sum(vals) # return nxtval
def _radixPass(a, b, r, n, K): """ Stable sort of the sequence a according to the keys given in r. >>> a=range(5) >>> b=[0]*5 >>> r=[2,1,3,0,4] >>> _radixPass(a, b, r, 5, 5) >>> b [3, 1, 0, 2, 4] When n is less than the length of a, the end of b must be left unaltered. >>> b=[5]*5 >>> _radixPass(a, b, r, 2, 2) >>> b [1, 0, 5, 5, 5] >>> _a=a=[1, 0] >>> b= [0]*2 >>> r=[0, 1] >>> _radixPass(a, b, r, 2, 2) >>> a=_a >>> b [0, 1] >>> a=[1, 1] >>> _radixPass(a, b, r, 2, 2) >>> b [1, 1] >>> a=[0, 1, 1, 0] >>> b= [0]*4 >>> r=[0, 1] >>> _radixPass(a, b, r, 4, 2) >>> a=_a >>> b [0, 0, 1, 1] """ c = _array("i", [0]*(K+1)) # counter array for i in xrange(n): # count occurrences c[r[a[i]]]+=1 sum=0 for i in xrange(K+1): # exclusive prefix sums t = c[i] c[i] = sum sum += t for a_i in a[:n]: # sort b[c[r[a_i]]] = a_i c[r[a_i]]+=1
def Export(self): if self.Clocks > 256: cmds = _array('B') cmds.extend(Note(self.Data, 256).Export()) cmds.extend(Ext_17_AddLenght(self.Clocks - 256).Export()) return cmds else: return Note(self.Data, self.Clocks).Export()
def _radixPass(a, b, r, n, K): """ Stable sort of the sequence a according to the keys given in r. >>> a=range(5) >>> b=[0]*5 >>> r=[2,1,3,0,4] >>> _radixPass(a, b, r, 5, 5) >>> b [3, 1, 0, 2, 4] When n is less than the length of a, the end of b must be left unaltered. >>> b=[5]*5 >>> _radixPass(a, b, r, 2, 2) >>> b [1, 0, 5, 5, 5] >>> _a=a=[1, 0] >>> b= [0]*2 >>> r=[0, 1] >>> _radixPass(a, b, r, 2, 2) >>> a=_a >>> b [0, 1] >>> a=[1, 1] >>> _radixPass(a, b, r, 2, 2) >>> b [1, 1] >>> a=[0, 1, 1, 0] >>> b= [0]*4 >>> r=[0, 1] >>> _radixPass(a, b, r, 4, 2) >>> a=_a >>> b [0, 0, 1, 1] """ c = _array("i", [0] * (K + 1)) # counter array for i in xrange(n): # count occurrences c[r[a[i]]] += 1 sum = 0 for i in xrange(K + 1): # exclusive prefix sums t = c[i] c[i] = sum sum += t for a_i in a[:n]: # sort b[c[r[a_i]]] = a_i c[r[a_i]] += 1
def Export(self): # if (0x80 > self.Data > 0xDF or self.Clocks < 0): raise AnException if (self.Clocks > 256): clocks = self.Clocks cmds = _array('B') cmds.extend(Legato().Export()) while (clocks > 256): cmds.extend(Note(self.Data, 256).Export()) cmds.extend(Legato().Export()) clocks -= 256 cmds.extend(Note(self.Data, clocks).Export()) return cmds elif (self.Clocks < 1): return _array('B') else: return _array('B', [self.Data, self.Clocks - 1])
def next(self): # group = self.win.Get_group() size = group.Get_size() rank = group.Get_rank() group.Free() # incr = _array('i', [1]) vals = _array('i', [0])*size self.win.Lock(0) self.win.Accumulate([incr, 1, MPI.INT], 0, [rank, 1, MPI.INT], MPI.SUM) self.win.Get([vals, 1, self.dt_get], 0, [ 0, 1, self.dt_get]) self.win.Unlock(0) # vals[rank] = self.myval self.myval += 1 nxtval = sum(vals) # return nxtval
def LCP(SA): """ Compute the longest common prefix for every adjacent suffixes. The result is a list of same size as SA. Given two suffixes at positions i and i+1, their LCP is stored at position i+1. A zero is stored at position 0 of the output. >>> SA=SuffixArray("abba", unit=UNIT_BYTE) >>> SA._LCP_values array('i', [0, 1, 0, 1]) >>> SA=SuffixArray("", unit=UNIT_BYTE) >>> SA._LCP_values array('i') >>> SA=SuffixArray("", unit=UNIT_CHARACTER) >>> SA._LCP_values array('i') >>> SA=SuffixArray("", unit=UNIT_WORD) >>> SA._LCP_values array('i') >>> SA=SuffixArray("abab", unit=UNIT_BYTE) >>> SA._LCP_values array('i', [0, 2, 0, 1]) """ string = SA.string length = SA.length lcps = _array("i", [0] * length) SA = SA.SA if _trace: delta = max(length // 100, 1) for i, pos in enumerate(SA): if i % delta == 0: percent = float((i + 1) * 100) / length print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r" % ( percent, i + 1, length), lcps[i] = _longestCommonPrefix(string, string, SA[i - 1], pos) else: for i, pos in enumerate(SA): lcps[i] = _longestCommonPrefix(string, string, SA[i - 1], pos) if _trace: print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r" % (100.0, length, length) if lcps: # Correct the case where string[0] == string[-1] lcps[0] = 0 return lcps
def __init__(self,data,dtype=None,copy=True): self.dim = len(data) types = None if self.dim>0: types = set([type(x) for x in data]) if dtype is not None: types = (dtype,) tc,self.dtype = coerce_(types) data = [self.dtype(x) for x in data] if copy is True: self.data = _array(tc,data) else: raise NotImplementedError
def radixpass(a, b, r, s, n, k): c = _array("i", [0] * (k + 1)) for i in range(n): c[r[a[i] + s]] += 1 somme = 0 for i in range(k + 1): freq, c[i] = c[i], somme somme += freq for i in range(n): b[c[r[a[i] + s]]] = a[i] c[r[a[i] + s]] += 1
def __init__(self, data, dtype=None, copy=True): self.dim = len(data) types = None if self.dim > 0: types = set([type(x) for x in data]) if dtype is not None: types = (dtype, ) tc, self.dtype = coerce_(types) data = [self.dtype(x) for x in data] if copy is True: self.data = _array(tc, data) else: raise NotImplementedError
def save(self, path=None): if path: self.path = path f = open(path, 'wb') arr = _array._array('B', self.bmp_header) for i in range(self.height): for j in range(self.width): arr.extend((self.rgb_data[i][j] & 0xff, (self.rgb_data[i][j] >> 8) & 0xff, (self.rgb_data[i][j] >> 16) & 0xff)) arr.extend((*bytes(self.pad_bytes), )) f.write(arr.tobytes()) f.close()
def Export(self): if (self.NoiseEnabled is None): self._NoiseEnabled = { 0: False, 1: True, }[self.Data & 0x80] elif (type(self.NoiseEnabled) is bool): self._NoiseEnabled = self.NoiseEnabled else: raise TypeError() e = _array('B', [0xED]) e.append(self.Data | self._NoiseEnabled << 7) return e
def step1_sort_suffix(self): char_frontier = chr(2) self.global_suffix = char_frontier.join(self.array_str) nbChars = len(self.global_suffix) init = [-1] * nbChars self.idxString = _array("i", init) self.idxPos = _array("i", init) self.endAt = _array("i", init) k = idx = 0 for mot in self.array_str: last = k + len(mot) for p in range(len(mot)): self.idxString[k] = idx self.idxPos[k] = p self.endAt[k] = last k += 1 idx += 1 k += 1 self.res = direct_kark_sort(self.global_suffix)
def LCP(SA): """ Compute the longest common prefix for every adjacent suffixes. The result is a list of same size as SA. Given two suffixes at positions i and i+1, their LCP is stored at position i+1. A zero is stored at position 0 of the output. >>> SA=SuffixArray("abba", unit=UNIT_BYTE) >>> SA._LCP_values array('i', [0, 1, 0, 1]) >>> SA=SuffixArray("", unit=UNIT_BYTE) >>> SA._LCP_values array('i') >>> SA=SuffixArray("", unit=UNIT_CHARACTER) >>> SA._LCP_values array('i') >>> SA=SuffixArray("", unit=UNIT_WORD) >>> SA._LCP_values array('i') >>> SA=SuffixArray("abab", unit=UNIT_BYTE) >>> SA._LCP_values array('i', [0, 2, 0, 1]) """ string=SA.string length=SA.length lcps=_array("i", [0]*length) SA=SA.SA if _trace: delta=max(length//100,1) for i, pos in enumerate(SA): if i%delta==0: percent=float((i+1)*100)/length print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r"%(percent, i+1, length), lcps[i]=_longestCommonPrefix(string, string, SA[i-1], pos) else: for i, pos in enumerate(SA): lcps[i]=_longestCommonPrefix(string, string, SA[i-1], pos) if _trace: print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r"%(100.0, length, length) if lcps: # Correct the case where string[0] == string[-1] lcps[0] = 0 return lcps
def _findOne(self, subString): """ >>> SA=SuffixArray("mississippi", unit=UNIT_BYTE) >>> SA._findOne("ippi") 1 >>> SA._findOne("missi") 4 """ SA = self.SA LCPs = self._LCP_values string = self.string try: subString = _array( "i", [self.tokId[c] for c in self.tokenize(subString)]) except KeyError: # if a token of the subString is not in the vocabulary # the substring can't be in the string return False lenSubString = len(subString) ################################# # Dichotomy search of subString # ################################# lower = 0 upper = self.length success = False while upper - lower > 0: middle = (lower + upper) // 2 middleSubString = string[SA[middle]:min(SA[middle] + lenSubString, self.length)] #NOTE: the cmp function is removed in Python 3 #Strictly speaking we are doing one comparison more now if subString < middleSubString: upper = middle elif subString > middleSubString: lower = middle + 1 else: success = True break if not success: return False else: return middle
def _findOne(self, subString): """ >>> SA=SuffixArray("mississippi", unit=UNIT_BYTE) >>> SA._findOne("ippi") 1 >>> SA._findOne("missi") 4 """ SA=self.SA LCPs=self._LCP_values string=self.string try: subString=_array("i", [self.tokId[c] for c in self.tokenize(subString)]) except KeyError: # if a token of the subString is not in the vocabulary # the substring can't be in the string return False lenSubString=len(subString) ################################# # Dichotomy search of subString # ################################# lower=0 upper=self.length success=False while upper-lower >0: middle=(lower+upper)//2 middleSubString=string[SA[middle]:min(SA[middle]+lenSubString,self.length)] #NOTE: the cmp function is removed in Python 3 #Strictly speaking we are doing one comparison more now if subString < middleSubString: upper=middle elif subString > middleSubString: lower=middle+1 else: success=True break if not success: return False else: return middle
def mkRawimageData(w, h, data, f=RAWIMAGE_FORMAT_RAW_ARGB_ID): size = len(data) obj = type('_rawimg_%u' % size, (PyMUICStructureType,), {'_fields_': [ ('ri_Width', c_ULONG), ('ri_Height', c_ULONG), ('ri_Format', c_ULONG), ('ri_Size', c_ULONG), ('ri_Data', c_UBYTE.ArrayType(size)) ]})() obj.ri_Width = w obj.ri_Height = h obj.ri_Format = f obj.ri_Size = size obj.ri_Data[:] = _array('B', data) return obj
def _findOne(self, subString): """ >>> SA=SuffixArray("mississippi", unit=UNIT_BYTE) >>> SA._findOne("ippi") 1 >>> SA._findOne("missi") 4 """ SA = self.SA LCPs = self._LCP_values string = self.string try: subString = _array( "i", [self.tokId[c] for c in self.tokenize(subString)]) except KeyError: # if a token of the subString is not in the vocabulary # the substring can't be in the string return False lenSubString = len(subString) ################################# # Dichotomy search of subString # ################################# lower = 0 upper = self.length success = False while upper - lower > 0: middle = (lower + upper) // 2 middleSubString = string[SA[middle]:min(SA[middle] + lenSubString, self.length)] cmpRes = cmp(subString, middleSubString) if cmpRes == -1: upper = middle elif cmpRes == 1: lower = middle + 1 else: success = True break if not success: return False else: return middle
def _findOne(self, subString): """ >>> SA=SuffixArray("mississippi", unit=UNIT_BYTE) >>> SA._findOne("ippi") 1 >>> SA._findOne("missi") 4 """ SA=self.SA LCPs=self._LCP_values string=self.string try: subString=_array("i", [self.tokId[c] for c in self.tokenize(subString)]) except KeyError: # if a token of the subString is not in the vocabulary # the substring can't be in the string return False lenSubString=len(subString) ################################# # Dichotomy search of subString # ################################# lower=0 upper=self.length success=False while upper-lower >0: middle=(lower+upper)//2 middleSubString=string[SA[middle]:min(SA[middle]+lenSubString,self.length)] cmpRes=cmp(subString, middleSubString) if cmpRes == -1: upper=middle elif cmpRes == 1: lower=middle+1 else: success=True break if not success: return False else: return middle
def __init__(self, dtype, msize, a=None): from array import array from operator import isSequenceType if not isSequenceType(msize): msize = (msize, msize) elif len(msize) == 1: msize = (1, 1) if a is None: self._a = array(_dtype2array[dtype], '\x00'*(_dsize(dtype)*_prod(msize))) elif isinstance(a, array): self._a = a else: self._a = _array(_dtype2array[dtype], _flatten(a)) self.msize = msize self.dtype = dtype
def __init__(self, dtype, msize, a=None): from array import array from operator import isSequenceType if not isSequenceType(msize): msize = (msize, msize) elif len(msize) == 1: msize = (1, 1) if a is None: self._a = array(_dtype2array[dtype], '\x00' * (_dsize(dtype) * _prod(msize))) elif isinstance(a, array): self._a = a else: self._a = _array(_dtype2array[dtype], _flatten(a)) self.msize = msize self.dtype = dtype
def Export(self): global OPM_CLOCK timerb = round(256 - 60 * OPM_CLOCK / (self.Data * 48 * 1024)) # opm_tempo = 256 - 60 * opm_clock / (bpm_tempo * 48 * 1024) # bpm_tempo = 60 * opm_clock / (48 * 1024 * (256 - opm_tempo)) # # If opm_clock == 4mHz: # opm_tempo = 256 - (78125 / (16 * bpm_tempo)) # bpm_tempo = 78125 / (16 * (256 - opm_tempo)) # # Thanks to vampirefrog timerb = _util.Clamp(timerb, 0, 256) return _array('B', [0xFF, timerb])
def _radixPass(a, b, r, n, K): c = _array("i", [0]*(K+1)) # counter array for i in range(n): # count occurrences c[r[a[i]]]+=1 sum=0 for i in range(K+1): # exclusive prefix sums t = c[i] c[i] = sum sum += t for a_i in a[:n]: # sort b[c[r[a_i]]] = a_i c[r[a_i]]+=1
def find(cond): """Return linear index of elements where the condition is True. The index is based on 1.""" res = [] for i, x in enumerate(cond): if bool(x): res.append(i+1) shp = _size(cond) if shp[0] > 1: if shp[1] > 1: shp = (len(res), 1) else: shp = (len(res), 1) elif shp[1] > 1: shp = (1, len(res)) else: shp = (len(res), 1) na = _marray('double', shp, _array(_dtype2array['double'], res)) return na
def extend(self, n): """Grow the sieve to cover all primes <= n (a real number). Examples ======== >>> from sympy import sieve >>> from array import array # this line and next for doctest only >>> sieve._list = array('l', [2, 3, 5, 7, 11, 13]) >>> sieve.extend(30) >>> sieve[10] == 29 True """ n = int(n) if n <= self._list[-1]: return # We need to sieve against all bases up to sqrt(n). # This is a recursive call that will do nothing if there are enough # known bases already. maxbase = int(n**0.5) + 1 self.extend(maxbase) # Create a new sieve starting from sqrt(n) begin = self._list[-1] + 1 newsieve = _arange(begin, n + 1) # Now eliminate all multiples of primes in [2, sqrt(n)] for p in self.primerange(2, maxbase): # Start counting at a multiple of p, offsetting # the index to account for the new sieve's base index startindex = (-begin) % p for i in xrange(startindex, len(newsieve), p): newsieve[i] = 0 # Merge the sieves self._list += _array('l', [x for x in newsieve if x])
def _DataObject(dtype, data): return _array(_dtype2array[dtype], data)
def coerce(self,dtype): data = [dtype(x) for x in self.data] tc,dtype = coerce_((dtype,)) self.data = _array(tc,data) self.dtype = dtype
def array(typecode, initializer=()): return _array(str(typecode), initializer)
def array(typecode, *args, **kwargs): """Create array.""" if isinstance(typecode, unicode): typecode = typecode.encode() return _array(typecode, *args, **kwargs)
def _suffixArrayWithTrace(s, SA, n, K, operations, totalOperations): """ This function is a rewrite in Python of the C implementation proposed in Kärkkäinen and Sanders paper. Find the suffix array SA of s[0..n-1] in {1..K}^n Require s[n]=s[n+1]=s[n+2]=0, n>=2 """ if _trace: _traceSuffixArray(operations, totalOperations) n0 = (n+2)//3 n1 = (n+1)//3 n2 = n//3 n02 = n0+n2 SA12 = _array("i", [0]*(n02+3)) SA0 = _array("i", [0]*n0) s0 = _array("i", [0]*n0) # s12 : positions of mod 1 and mod 2 suffixes s12 = _array("i", [i for i in xrange(n+(n0-n1)) if i%3])# <- writing i%3 is more efficient than i%3!=0 s12.extend([0]*3) # lsb radix sort the mod 1 and mod 2 triples _radixPass(s12, SA12, s[2:], n02, K) if _trace: operations+=n02 _traceSuffixArray(operations, totalOperations) _radixPass(SA12, s12, s[1:], n02, K) if _trace: operations+=n02 _traceSuffixArray(operations, totalOperations) _radixPass(s12, SA12, s, n02, K) if _trace: operations+=n02 _traceSuffixArray(operations, totalOperations) # find lexicographic names of triples name = 0 c= _array("i",[-1]*3) for i in xrange(n02) : cSA12=s[SA12[i]:SA12[i]+3] if cSA12!=c: name+=1 c=cSA12 if SA12[i] % 3 == 1 : s12[SA12[i]//3] = name # left half else : s12[(SA12[i]//3) + n0] = name # right half if name < n02 : # recurse if names are not yet unique operations=_suffixArrayWithTrace(s12, SA12,n02,name+1,operations, totalOperations) if _trace: _traceSuffixArray(operations, totalOperations) # store unique names in s12 using the suffix array for i,SA12_i in enumerate(SA12[:n02]): s12[SA12_i] = i + 1 else: #generate the suffix array of s12 directly if _trace: operations+=_nbOperations(n02) _traceSuffixArray(operations, totalOperations) for i,s12_i in enumerate(s12[:n02]): SA12[s12_i - 1] = i # stably sort the mod 0 suffixes from SA12 by their first character j=0 for SA12_i in SA12[:n02]: if (SA12_i < n0): s0[j] = 3*SA12_i j+=1 _radixPass(s0,SA0,s,n0,K) if _trace: operations+=n0 _traceSuffixArray(operations, totalOperations) # merge sorted SA0 suffixes and sorted SA12 suffixes p = j = k = 0 t = n0 - n1 while k < n : if SA12[t] < n0 :# pos of current offset 12 suffix i = SA12[t] * 3 + 1 else : i = (SA12[t] - n0 ) * 3 + 2 j = SA0[p]#pos of current offset 0 suffix if SA12[t] < n0 : bool = (s[i], s12[SA12[t]+n0]) <= (s[j], s12[int(j/3)]) else : bool = (s[i], s[i+1], s12[SA12[t]-n0+1]) <= ( s[j], s[j+1], s12[int(j/3)+n0]) if(bool) : SA[k] = i t += 1 if t == n02 : # done --- only SA0 suffixes left k += 1 while p < n0 : SA[k] = SA0[p] p += 1 k += 1 else : SA[k] = j p += 1 if p == n0 :#done --- only SA12 suffixes left k += 1 while t < n02 : if SA12[t] < n0 :# pos of current offset 12 suffix SA[k] = (SA12[t] * 3) + 1 else : SA[k] = ((SA12[t] - n0) * 3) + 2 t += 1 k += 1 k += 1 return operations
def array(typecode, *args, **kwargs): if isinstance(typecode, unicode): typecode = typecode.encode() return _array(typecode, *args, **kwargs)
def crc(self): encoding = _array('b', _pack('!4H', self.product_code, self.major_version, self.minor_version, self.revision)) return _crc(encoding)
def _arange(a, b): ar = _array('l', [0]*(b - a)) for i, e in enumerate(range(a, b)): ar[i] = e return ar
def _array_new(size, typecode, init=0): return _array(typecode, [init]) * size
def find(self, subString, features=[]): """ Dichotomy search of subString in the suffix array. As soon as a suffix which starts with subString is found, it uses the LCPs in order to find the other matching suffixes. The outputs consists in a list of tuple (pos, feature0, feature1, ...) where feature0, feature1, ... are the features attached to the suffix at position pos. Features are listed in the same order as requested in the input list of features [featureName0, featureName1, ...] >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("ssi") array('i', [5, 2]) >>> SA.find("mi") array('i', [0]) >>> SA=SuffixArray('miss A and miss B', UNIT_WORD) >>> SA.find("miss") array('i', [0, 3]) >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("iss", ['LCP']) [(4, 1), (1, 4)] >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("A") array('i') >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("pp") array('i', [8]) >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("ppp") array('i') >>> SA=SuffixArray('mississippi', UNIT_BYTE) >>> SA.find("im") array('i') """ SA=self.SA LCPs=self._LCP_values string=self.string middle=self._findOne(subString) if middle is False: return _array('i') subString=_array("i", [self.tokId[c] for c in self.tokenize(subString)]) lenSubString=len(subString) ########################################### # Use LCPS to retrieve the other suffixes # ########################################### lower=middle upper=middle+1 middleLCP=LCPs[middle] while lower>0 and LCPs[lower]>=lenSubString: lower-=1 while upper<self.length and LCPs[upper]>=lenSubString: upper+=1 ############################################### # When features is empty, outputs a flat list # ############################################### res=SA[lower:upper] if len(features)==0: return res ############################################## # When features is non empty, outputs a list # # of tuples (pos, feature_1, feature_2, ...) # ############################################## else: features=[getattr(self, "_%s_values"%featureName) for featureName in features] features=[featureValues[lower:upper] for featureValues in features] return zip(res, *features)
def random_mic(): p = _pyaudio.PyAudio() stream = p.open(format=_pyaudio.paInt16, channels=1, rate=44100, input=True, output=True, frames_per_buffer=8) sd = sum(_array('h', stream.read(16)).tolist()) return _proc_seed(abs(sd))
def array(typecode, initializer): return _array(typecode.encode('ascii'), initializer)