class LRU_Cache(object): def __init__(self, capacity=5): self.queue = OrderedDict() self.capacity = capacity self.size = 0 def _check_capacity(self): if self.size < self.capacity: return lru = self.queue.popitem(last=False) self.size -= 1 def get(self, key): if key not in self.queue: return -1 value = self.queue[key] del self.queue[key] self.queue[key] = value return value def set(self, key, value): self._check_capacity() if key in self.queue: del self.queue[key] self.queue[key] = value self.size += 1 def __repr__(self): return self.queue.__repr__()
class FixedSizeDict: def __init__(self, size: int): if size < 1: raise ValueError self.size = size self._d = OrderedDict() def __getitem__(self, item): return self._d[item] def get(self, item): return self._d.get(item) def __setitem__(self, key, value): try: self._d.__getitem__(key) except KeyError: if len(self._d) >= self.size: self._pop() self._d[key] = value def _pop(self): return self._d.popitem(last=False) def __len__(self): return len(self._d) def __repr__(self): return self._d.__repr__()
class FnArg(object): def __init__(self): self.attributes = OrderedDict() def setattr(self, attr, value): self.attributes[attr] = value setattr(self, attr, value) def __repr__(self): return self.attributes.__repr__() def __getitem__(self, i): param = [*self.attributes.keys()][i] return self.attributes[param] def get_parameters(self, params): out = [] for param in params: out.append(getattr(self, param)) return out def to_array(self): values = [v.ravel() for v in self.attributes.values()] return np.concatenate(values)
class SymbolTable(MutableMapping): """ La diferencia entre una tabla de símbolos y un dict es que si la llave ya está en la tabla, entonces se debe lanzar excepción. """ def __init__(self): self.dict = OrderedDict() def __getitem__(self, key): return self.dict[key] def __setitem__(self, key, value): """Aquí, si key ya está, regresar excepción""" if key in self.dict: raise KeyError(key) self.dict[key] = value def __delitem__(self, key): del self.dict[key] def __iter__(self): return iter(self.dict) def __len__(self): return len(self.dict) def __repr__(self): return self.dict.__repr__()
class Results(object): def __init__(self, values): self.values = tuple([i[1] for i in values]) self.__keys = tuple([i[0] for i in values]) self.__dict = OrderedDict(values) def __iter__(self): return iter(self.values) def __getitem__(self, key): if isinstance(key, int): return self.values[key] else: return self.__dict[key] def __len__(self): return len(self.values) def __repr__(self): return "Results"+self.__dict.__repr__()[11:] def __str__(self): return "Results"+self.__dict.__str__()[11:] def __contains__(self, key): return True if key in self.__dict else False def get_keys(self): return self.__keys
class RingCache(object): def __init__(self, maxEntries=100, isAutoAdd=False): self.max = maxEntries self.d = OrderedDict() self.isAutoAdd = isAutoAdd def add(self, k, v=None): if self.max <= len(self.d): self.d.popitem(last=False) if k in self.d: del (self.d[k]) self.d[k] = v def get(self, k): return self.d[k] def remove(self, k): del (self.d[k]) def __contains__(self, k): if k in self.d: v = self.d[k] del (self.d[k]) self.d[k] = v return True else: if self.isAutoAdd: self.add(k) return False def __len__(self): return len(self.d) def __repr__(self): return self.d.__repr__()
def __repr__(self): """ This function ... :return: """ return 'OrderedDefaultDict(%s, %s)' % (self.default_factory, OrderedDict.__repr__(self))
class _OrderedMultimap(MutableMapping): """ Dictionary that can hold multiple values for the same key In order not to break existing customers, getting and inserting elements with ``[]`` keeps the same behaviour as the built-in dict. If multiple elements are already mapped to the key, ``[]` will return the newest one. To map multiple elements to a key, use the ``add_item`` operation. To retrieve all the values map to a key, use ``get_all_values``. """ def __init__(self, *args, **kwargs): super().__init__() self.__store = OrderedDict() if args is not None and len(args) > 0: for key, value in six.iteritems(args[0]): self.__store[key] = MultimapValue(value) def __getitem__(self, key): return self.__store[key][ len(self.__store[key]) - 1] # Return only one in order not to break clients def __delitem__(self, key): del self.__store[key] def __setitem__(self, key, value): self.__store[key] = MultimapValue(value) def __len__(self): return sum([len(values) for values in six.itervalues(self.__store)]) def __iter__(self): for key in six.iterkeys(self.__store): yield key def add_item(self, key, value): if key in self.__store: self.__store[key].append(value) else: self.__setitem__(key, value) def get_all_values(self, key): return self.__store[key] def iteritems(self): for key in self.__store: for value in self.__store[key]: yield (key, value) def items(self): output = [] for k, v in self.iteritems(): output.append((k, v)) return output def __repr__(self): return self.__store.__repr__()
class ObjectContainer(object): ''' Tab-completable OrderedDict container ''' def __init__(self,*args, **kwargs): self._container = OrderedDict() def _add_item(self,key,val): self._container[key] = val self.__setattr__(key, val) def _remove_key(self,key): self._container.pop(key) self.__dict__.pop(key) def _remove_val(self,val): for item in list(self._container.items()): if item[1] == val: key_to_pop = item[0] self._remove_key(key_to_pop) def items(self): return self._container.items() def __contains__(self,k): return k in self._container def __iter__(self): return list(self._container.values()).__iter__() def __setitem__(self,k,v): self._add_item(k,v) def __getitem__(self,k): if type(k) == int: return list(self._container.values())[k] else: return self._container[k] def __repr__(self): return self._container.__repr__() def __eq__(self,other): return self._container == other._container def __ne__(self,other): return self._container != other._container def __len__(self): return len(self._container) def __clearkeys__(self): #+ backwards support for objectdict keys = list(self._container.keys()) for key in keys: self._remove_key(key)
def __repr__(self, _repr_running={}): temp = self.__class__.__name__ try: # The OrderedDict.__repr__ function takes an # extra argument. It also prints the name of # the main object's class. This logic temporarily # resets the class name so this appears to be # what it (fundamentally) is: an OrderedDict # object. (For this reason, there is also extra # logic to make the XMLDictNode __repr__ function # work correctly.) self.__class__.__name__ = _OrderedDict.__name__ rv = _OrderedDict.__repr__(self, _repr_running) except: rv = _OrderedDict.__repr__(self) finally: self.__class__.__name__ = temp return rv
def __repr__(self, _repr_running=None): if _repr_running is None: _repr_running = {} temp = self.__class__.__name__ try: # The OrderedDict.__repr__ function takes an # extra argument. It also prints the name of # the main object's class. This logic temporarily # resets the class name so this appears to be # what it (fundamentally) is: an OrderedDict # object. (For this reason, there is also extra # logic to make the XMLDictNode __repr__ function # work correctly.) self.__class__.__name__ = _OrderedDict.__name__ rv = _OrderedDict.__repr__(self, _repr_running) except TypeError: # Looks like the class didn't understand the second # argument. Retry with just one argument. rv = _OrderedDict.__repr__(self) finally: self.__class__.__name__ = temp return rv
class Groups(object): """Helper class to manage groups with following abilities: - keeps list of groups and their elements (each element may by only in one group) - may determine the group of an item - may return a list with items of a single group or all the items from all the groups The class uses OrderedDict for storage and thus keeps the order. """ def __init__(self, groups): """groups - dict like object with key : [item1, item2, ...] pairs""" self.groups = OrderedDict(groups) self.match = OrderedDict() for groups, keylist in self.groups.items(): for key in keylist: self.match[key] = groups def __repr__(self): return self.groups.__repr__() def keys(self, group=None): """Returns iterable with items of a group or all the items of all the groups""" return self.groups[group] if group else chain(*self.groups.values()) def items(self): """Unpack each group and iterate group, key pairs""" for group, keys in self.groups.items(): for key in keys: yield group, key def __contains__(self, item): """Checks that item is known""" return item in self.match def __getitem__(self, item): """Returns a group of an item""" return self.match[item] def hasgroup(self, group): return group in self.groups def containsall(self, items): for item in items: if not item in self: return False return True def samegroup(self, items): return len({self.match[item] for item in items}) == 1 def group(self, item): return self.groups[self[item]]
class StateEntry: def __init__(self, state): self.state = state self.parties = OrderedDict() def addParty(self, party, votes): self.parties[party] = votes def setTotal(self, total_tag, votes): self.total = OrderedDict() self.total[total_tag] = votes def partyCount(self): return len(self.parties) def partyList(self): return self.parties.keys() def totalVotes(self): return self.total.values()[0] def winningParty(self): return reduce(lambda p, q: p if self.parties[p] > self.parties[q] else q, self.parties) def voteRate(self, party): total_votes = self.totalVotes() vote_for_party = self.parties[party] return float(vote_for_party) / total_votes def entry(self): entries = OrderedDict() entries['State'] = self.state entries['Votes'] = OrderedDict() for party in self.parties.items(): entries['Votes'][party[0]] = party[1] if party[1] >= 0 else '-' entries[self.total.keys()[0]] = \ self.total.values()[0] if self.total.values()[0] else '-' return entries def __repr__(self): return 'state=' + self.state + ' ' + self.parties.__repr__()
class OrderedDefaultDict(object): """ Implements (most of) a default dict with ordering. """ def __init__(self, factory): self._factory = factory self._dict = OrderedDict() def __setitem__(self, key, item): self._dict.__setitem__(key, item) def __getitem__(self, key): if key not in self._dict: self._dict[key] = self._factory() return self._dict.__getitem__(key) def __repr__(self): return self._dict.__repr__() def __iter__(self): return self._dict.__iter__()
class _NodeDict(MutableMapping): def __init__(self, mapping=None): self._name = 'None' self._data = OrderedDict() if mapping is not None: self.update(mapping) @property def name(self): return self._name @name.setter def name(self, val): self._name = val for k, v in self.items(): v.name = self.name + '[' + str(k) + ']' def __delitem__(self, key): self._data[key].name = None del self._data[key] def __getitem__(self, key): return self._data[key] def __iter__(self): return self._data.__iter__() def __len__(self): return len(self._data) def __repr__(self): return self._data.__repr__() def __setitem__(self, key, val): val.name = self.name + '[' + str(key) + ']' self._data[key] = val def __str__(self): return self.__repr__()
class FeatureContainer: r"""A simple wrapper for OrderedDict.""" def __init__(self): self._dict = OrderedDict() def __setitem__(self, key, val): if key not in self._dict: self._dict[key] = list() self._dict[key].append(val) def __getitem__(self, key): return self._dict[key] def __repr__(self): return self._dict.__repr__() def items(self): return self._dict.items() def keys(self): return self._dict.keys() def values(self): return self._dict.values()
class RingCache( object ): def __init__( self, maxEntries = 100, isAutoAdd = False ): self.max = maxEntries self.d = OrderedDict() self.isAutoAdd = isAutoAdd def add( self, k, v = None ): if self.max <= len( self.d ): self.d.popitem( last = False ) if k in self.d: del( self.d[ k ] ) self.d[ k ] = v def get( self, k ): return self.d[ k ] def remove( self, k ): del( self.d[ k ] ) def __contains__( self, k ): if k in self.d: v = self.d[ k ] del( self.d[ k ] ) self.d[ k ] = v return True else: if self.isAutoAdd: self.add( k ) return False def __len__( self ): return len( self.d ) def __repr__( self ): return self.d.__repr__()
def __repr__(self): return "OrderedDefaultDict(%s, %s)" % ( self.default_factory, OrderedDict.__repr__(self), )
def __repr__(self): return "OrderedDefaultDict({}, {})".format( self.default_factory, OrderedDict.__repr__(self) )
def __repr__(self): return 'OrderedDefaultDict({!s}, {!s})'.format( self._default_factory, OrderedDict.__repr__(self))
class Config(abc.MutableMapping, dict): """ A `Config` object acts as a proxy to an ordered dict. The dict contains the keys and values that the config consists of. For example, consider the following Arma 3 Config class: class MyClass { string_value = "This is a string"; array_value[] = {"This", "is", "an", "array}; }; When the above is represented as a dictionary, it would look something like this: { 'MyClass': { 'string_value': 'This is a string', 'array_value': ['This', 'is', 'an', 'array'] } } """ @classmethod def from_dict(self, name, dict_, **kwargs): conf = Config(name, **kwargs) for k, v in dict_.items(): conf.add(v, k) return conf def __init__(self, name, inherits=None, parent=None): self.name = name self.parent = parent if inherits: self.add_inherits(inherits) else: self.inherits = None self._dict = OrderedDict() def to_dict(self): out = {} for k in self: item = self[k] if isinstance(item, Config): out[k] = item.to_dict() else: out[k] = item return out def add(self, node, name=None): if isinstance(node, (Config, ValueNode)): name = node.name else: if name is None: raise Exception('name cant be none') if isinstance(node, dict): node = Config.from_dict(name, node, parent=self) elif isinstance(node, (Config, ValueNode)): node = node elif isinstance(node, (str, int, float, complex, list)): node = ValueNode(name, node) else: raise TypeError(str(type(node))) if name in self.iter_self(): raise ValueError('%s already defined' % name) self[name] = node def pop(self, key): return self._dict.pop(self._keytransform(key)) def add_inherits(self, inherits): try: self.inherits = self.parent.get_config(inherits) except KeyError: raise ValueError('Attempted to inherit non-existing config (%s)' % inherits) def get_config(self, k): k = self._keytransform(k) config = self._dict.get(k, None) if isinstance(config, Config): return config elif self.parent is not None: return self.parent.get_config(k) raise KeyError() def iter_self(self): return iter(self._dict) def items_raw(self): for key in self.iter_self(): yield key, self._get_raw(key) def values_raw(self): for key in self.iter_self(): yield self._get_raw(key) def _get_raw(self, item): item = self._keytransform(item) try: return self._dict[item] except KeyError: if self.inherits: return self.inherits._get_raw(item) raise def _keytransform(self, key): return key.lower() def __iter__(self): if self.inherits: yield from self.inherits yield from self.iter_self() def __repr__(self): return self._dict.__repr__() def __getitem__(self, item): raw = self._get_raw(item) if isinstance(raw, ValueNode): return raw.value return raw def __setitem__(self, item, value): if not isinstance(value, (Config, ValueNode)): if isinstance(value, dict): conf = Config(item, None, self) self._dict[self._keytransform(item)] = conf for k, v in value.items(): conf[k] = v return else: value = ValueNode(item, value) self._dict[self._keytransform(item)] = value def __delitem__(self, item): del self._dict[self._keytransform(item)] def __len__(self): return len(self._dict)
class Enviroment(): __global_env=None def __init__ (self, superEnv=None): self.map = OrderedDict() self.map_index = [] self.superEnv=superEnv if superEnv==None: Enviroment. __global_env=self self.put(new(LispSymbol,"+"),Plus()) self.put(new(LispSymbol,"-"),Minus()) self.put(new(LispSymbol,"define"),Define()) self.put(new(LispSymbol,"if"),If()) self.put(new(LispSymbol,"eq?"),Eq()) self.put(new(LispSymbol,">?"),Grt()) self.put(new(LispSymbol,"<?"),Lwt()) self.put(new(LispSymbol,"lambda"),Lambda()) self.put(new(LispSymbol,"begin"),Begin()) self.put(new(LispSymbol,"set!"),Set()) self.put(new(LispSymbol,"quote"),Quote()) self.put(new(LispSymbol,"write"),Write()) self.put(new(LispSymbol,"print"),BuildInFunctions.Print()) self.put(new(LispSymbol,"getParam"),GetParam()) self.put(new(LispSymbol,"getLocal"),GetLocal()) self.put(new(LispSymbol,"getSuperParam"),GetSuperParam()) self.put(new(LispSymbol,"getSuperLocal"),GetSuperLocal()) self.put(new(LispSymbol,"getGlobal"),GetGlobal()) self.put(new(LispSymbol,"label$"),LispTKLabel()) self.put(new(LispSymbol,"tk$"),LispTK()) self.put(new(LispSymbol,"text$"),LispTKText()) self.put(new(LispSymbol,"button$"),LispTKButton()) self.put(new(LispSymbol,"frame$"),LispTKFrame()) self.put(new(LispSymbol,"toplevel$"),LispTKToplevel()) self.put(new(LispSymbol,"scrollbar$"),LispTKScrollbar()) self.put(new(LispSymbol,"eval"),Eval()) self.put(new(LispSymbol,"str_concat"),Str_Concat()) self.put(new(LispSymbol,"load"),Load()) self.put(new(LispSymbol,"save"),Save()) self.put(new(LispSymbol,"type"),Type()) self.put(new(LispSymbol,"call"),Call()) def set(self, key ,value): if key in self.map: self.map[key]=value self.map_index=self.map.values() else: raise SymbolNotFound("Key '%s' is not defined, yet"%key.value) def put(self, key, value): self.map[key] = value self.map_index=self.map.values() def get(self,key): env = self while env != None: try: return env.map[key] except KeyError: env = env.superEnv print "UNDEFINED SYMBOL" raise SymbolNotFound("Symbol Not Found: \"%s\"" % key.value) def get_local_by_index(self,index): return self.map_index[index] def get_global_by_index(self,index): return Enviroment.__global_env.map_index[index] def get_local_index(self,element): i=0 for key in self.map: if key == element: return i i+=1 return -1 def get_super_local_by_index(self,index,env_number): _env = None for x in range(env_number): _env=self.superEnv return _env.get_local_by_index(index) def get_super_parameter_by_index(self,index,env_number): _env = None for x in range(env_number): _env=self.superEnv return _env.get_parameter_by_index(index) def get_global_index(self,element): i=0 for key in Enviroment.__global_env.map: if key == element: return i i+=1 return -1 def setParameterSymbols(self,symbols): self.parameter_symbols=symbols; def getParameterSymbols(self): return self.parameter_symbols def setParameterList(self, param): self.parameter=param def get_parameter_by_index(self, index): if hasattr(self,'parameter'): return self.parameter[index] return None def __repr__(self): return self.map.__repr__()
def __repr__(self, _repr_running=None): if _repr_running is None: _repr_running = {} return 'OrderedDefaultDict(%s, %s)' % (self.default_factory, OrderedDict.__repr__(self, _repr_running))
def __repr__(self): return 'DefaultOrderedDict(%s, %s)' % (self.default_factory, OrderedDict.__repr__(self))
class FleetStateTracker: """ A representation of a fleet of NuCypher nodes. """ _checksum = NO_KNOWN_NODES.bool_value(False) _nickname = NO_KNOWN_NODES _nickname_metadata = NO_KNOWN_NODES _tracking = False most_recent_node_change = NO_KNOWN_NODES snapshot_splitter = BytestringSplitter(32, 4) log = Logger("Learning") state_template = namedtuple( "FleetState", ("nickname", "metadata", "icon", "nodes", "updated")) def __init__(self): self.additional_nodes_to_track = [] self.updated = maya.now() self._nodes = OrderedDict() self.states = OrderedDict() def __setitem__(self, key, value): self._nodes[key] = value if self._tracking: self.log.info( "Updating fleet state after saving node {}".format(value)) self.record_fleet_state() else: self.log.debug("Not updating fleet state.") def __getitem__(self, item): return self._nodes[item] def __bool__(self): return bool(self._nodes) def __contains__(self, item): return item in self._nodes.keys() or item in self._nodes.values() def __iter__(self): yield from self._nodes.values() def __len__(self): return len(self._nodes) def __eq__(self, other): return self._nodes == other._nodes def __repr__(self): return self._nodes.__repr__() @property def checksum(self): return self._checksum @checksum.setter def checksum(self, checksum_value): self._checksum = checksum_value self._nickname, self._nickname_metadata = nickname_from_seed( checksum_value, number_of_pairs=1) @property def nickname(self): return self._nickname @property def nickname_metadata(self): return self._nickname_metadata @property def icon(self) -> str: if self.nickname_metadata is NO_KNOWN_NODES: return str(NO_KNOWN_NODES) return self.nickname_metadata[0][1] def addresses(self): return self._nodes.keys() def icon_html(self): return icon_from_checksum(checksum=self.checksum, number_of_nodes=str(len(self)), nickname_metadata=self.nickname_metadata) def snapshot(self): fleet_state_checksum_bytes = binascii.unhexlify(self.checksum) fleet_state_updated_bytes = self.updated.epoch.to_bytes( 4, byteorder="big") return fleet_state_checksum_bytes + fleet_state_updated_bytes def record_fleet_state(self, additional_nodes_to_track=None): if additional_nodes_to_track: self.additional_nodes_to_track.extend(additional_nodes_to_track) if not self._nodes: # No news here. return sorted_nodes = self.sorted() sorted_nodes_joined = b"".join(bytes(n) for n in sorted_nodes) checksum = keccak_digest(sorted_nodes_joined).hex() if checksum not in self.states: self.checksum = keccak_digest(b"".join( bytes(n) for n in self.sorted())).hex() self.updated = maya.now() # For now we store the sorted node list. Someday we probably spin this out into # its own class, FleetState, and use it as the basis for partial updates. new_state = self.state_template( nickname=self.nickname, metadata=self.nickname_metadata, nodes=sorted_nodes, icon=self.icon, updated=self.updated, ) self.states[checksum] = new_state return checksum, new_state def start_tracking_state(self, additional_nodes_to_track=None): if additional_nodes_to_track is None: additional_nodes_to_track = list() self.additional_nodes_to_track.extend(additional_nodes_to_track) self._tracking = True self.update_fleet_state() def sorted(self): nodes_to_consider = list( self._nodes.values()) + self.additional_nodes_to_track return sorted(nodes_to_consider, key=lambda n: n.checksum_public_address) def shuffled(self): nodes_we_know_about = list(self._nodes.values()) random.shuffle(nodes_we_know_about) return nodes_we_know_about def abridged_states_dict(self): abridged_states = {} for k, v in self.states.items(): abridged_states[k] = self.abridged_state_details(v) return abridged_states def abridged_nodes_dict(self): abridged_nodes = {} for checksum_address, node in self._nodes.items(): abridged_nodes[checksum_address] = self.abridged_node_details(node) return abridged_nodes @staticmethod def abridged_state_details(state): return { "nickname": state.nickname, "symbol": state.metadata[0][1], "color_hex": state.metadata[0][0]['hex'], "color_name": state.metadata[0][0]['color'], "updated": state.updated.rfc2822() } @staticmethod def abridged_node_details(node): try: last_seen = node.last_seen.iso8601() except AttributeError: # TODO: This logic belongs somewhere - anywhere - else. last_seen = str( node.last_seen) # In case it's the constant NEVER_SEEN return { "icon_details": node.nickname_icon_details(), # TODO: Mix this in better. "rest_url": node.rest_url(), "nickname": node.nickname, "checksum_address": node.checksum_public_address, "timestamp": node.timestamp.iso8601(), "last_seen": last_seen, "fleet_state_icon": node.fleet_state_icon, }
def __repr__(self, *args, **kwargs): return 'OrderedDefaultDict(%s, %s)' % (self.default_factory, OrderedDict.__repr__(self))
def __repr__(self): """ BodyData.__repr__ """ return "(BodyData) " + OrderedDict.__repr__(self)
def __repr__(self): return 'OrderedDefaultDict(%s, %s)'.format( self.default_factory, OrderedDict.__repr__(self))
def __repr__(self): #Make the repr pretty. not functional #(i.e. calling eval(config_object) won't work) string = OrderedDict.__repr__(self).strip('Config([])') string = string.replace("', ", ":").translate(None, '()') return "Config({"+string+"})"
class Headers: '''Utility for managing HTTP headers for both clients and servers. It has a dictionary like interface with few extra functions to facilitate the insertion of multiple header values. Header fields are **case insensitive**, therefore doing:: >>> h = Headers() >>> h['Content-Length'] = '1050' is equivalent to >>> h['content-length'] = '1050' :param headers: optional iterable over header field/value pairs. :param kind: optional headers type, one of ``server``, ``client`` or ``both``. :param strict: if ``True`` only valid headers field will be included. This :class:`Headers` container maintains an ordering as suggested by http://www.w3.org/Protocols/rfc2616/rfc2616.html: .. epigraph:: The order in which header fields with differing field names are received is not significant. However, it is "good practice" to send general-header fields first, followed by request-header or response-header fields, and ending with the entity-header fields. -- rfc2616 section 4.2 The strict parameter is rarely used and it forces the omission on non-standard header fields. ''' @classmethod def make(cls, headers): if not isinstance(headers, cls): headers = cls(headers) return headers def __init__(self, *args, **kwargs): self._headers = OrderedDict() if args or kwargs: self.update(*args, **kwargs) def __repr__(self): return self._headers.__repr__() def __str__(self): return '\r\n'.join(self._ordered()) def __bytes__(self): return str(self).encode(DEFAULT_CHARSET) def __len__(self): return len(self._headers) def update(self, *args, **kwargs): """Extend the headers with an ``iterable``. :param iterable: a dictionary or an iterable over keys, values tuples. """ if len(args) == 1: for key, value in mapping_iterator(args[0]): self.add_header(key, value) elif args: raise TypeError('update expected at most 1 arguments, got %d' % len(args)) for key, value in kwargs.items(): self.add_header(key, value) def override(self, iterable): '''Extend headers by overriding fields form iterable. :param iterable: a dictionary or an iterable over keys, values tuples. ''' seen = set() for key, value in mapping_iterator(iterable): key = key.lower() if key in seen: self.add_header(key, value) else: seen.add(key) self[key] = value def copy(self): return self.__class__(self) def __contains__(self, key): return header_field(key) in self._headers def __getitem__(self, key): key = header_field(key) values = self._headers[key] joiner = HEADER_FIELDS_JOINER.get(key, ', ') if joiner is None: joiner = '; ' return joiner.join(values) def __delitem__(self, key): self._headers.__delitem__(header_field(key)) def __setitem__(self, key, value): key = header_field(key) if key and value: if not isinstance(value, list): value = header_values(key, value) self._headers[key] = value def get(self, key, default=None): '''Get the field value at ``key`` as comma separated values. For example:: >>> from pulsar.utils.httpurl import Headers >>> h = Headers(kind='client') >>> h.add_header('accept-encoding', 'gzip') >>> h.add_header('accept-encoding', 'deflate') >>> h.get('accept-encoding') results in:: 'gzip, deflate' ''' if key in self: return self.__getitem__(key) else: return default def get_all(self, key, default=None): '''Get the values at header ``key`` as a list rather than a string separated by comma (which is returned by the :meth:`get` method). For example:: >>> from pulsar.utils.httpurl import Headers >>> h = Headers(kind='client') >>> h.add_header('accept-encoding', 'gzip') >>> h.add_header('accept-encoding', 'deflate') >>> h.get_all('accept-encoding') results in:: ['gzip', 'deflate'] ''' return self._headers.get(header_field(key), default) def has(self, field, value): '''Check if ``value`` is available in header ``field``.''' value = value.lower() for c in self.get_all(field, ()): if c.lower() == value: return True return False def pop(self, key, *args): return self._headers.pop(header_field(key), *args) def clear(self): '''Same as :meth:`dict.clear`, it removes all headers. ''' self._headers.clear() def getheaders(self, key): # pragma nocover '''Required by cookielib in python 2. If the key is not available, it returns an empty list. ''' return self._headers.get(header_field(key), []) def add_header(self, key, values): '''Add ``values`` to ``key`` header. If the header is already available, append the value to the list. :param key: header name :param values: a string value or a list/tuple of strings values for header ``key`` ''' key = header_field(key) if key and values: if not isinstance(values, (tuple, list)): values = header_values(key, values) current = self._headers.get(key, []) for value in values: if value and value not in current: current.append(value) self._headers[key] = current def remove_header(self, key, value=None): '''Remove the header at ``key``. If ``value`` is provided, it removes only that value if found. ''' key = header_field(key) if key: if value: value = value.lower() values = self._headers.get(key, []) removed = None for v in values: if v.lower() == value: removed = v values.remove(v) self._headers[key] = values return removed else: return self._headers.pop(key, None) def flat(self, version, status): '''Full headers bytes representation''' vs = version + (status, self) return ('HTTP/%s.%s %s\r\n%s' % vs).encode(DEFAULT_CHARSET) def __iter__(self): dj = ', ' for k, values in self._headers.items(): joiner = HEADER_FIELDS_JOINER.get(k, dj) if joiner: yield k, joiner.join(values) else: for value in values: yield k, value def _ordered(self): for key, header in self: yield "%s: %s" % (key, header) yield '' yield ''
def __repr__(self): return 'OrderedDefaultDict(%s, %s)' % (self.default_callable, OrderedDict.__repr__(self))
def __repr__(self): return "%s(%s, %s)" % (type(self), self.default_factory, OrderedDict.__repr__(self))
class UniversalOrderedStruct(UniversalCollection): """ Mostly like an OrderedDict, but it behaves like a list, in that (for x in struct) and (x in struct) looks over values, not keys. """ def __init__(self, *initializer): if len(initializer)==1 and type(initializer[0]) is dict: d = initializer[0] initializer = [OrderedDict((k, d[k]) for k in sorted(d.keys()))] self._heart = OrderedDict(*initializer) def __contains__(self, item): return item in self._heart.values() def __setitem__(self, key, value): if key is next or isinstance(key, int): raise InvalidKeyError('This sequence is an {}, and cannot be given {} key: {}'.format(self.__class__.__name__, key.__class__.__name__, key)) self._heart.__setitem__(key, value) def __getitem__(self, selector): if isinstance(selector, (list, slice, np.ndarray)): if isinstance(selector, slice): all_keys = list(self.keys()) start_index = all_keys.index(selector.start) if selector.start is not None else None stop_index = all_keys.index(selector.stop) if selector.stop is not None else None keys = all_keys[start_index:stop_index:selector.step] else: keys = selector return UniversalOrderedStruct((k, self[k]) for k in keys) else: return self._heart.__getitem__(selector) def __repr__(self): rep = self._heart.__repr__() return self.__class__.__name__ + rep[len(OrderedDict.__class__.__name__):] def __iter__(self): return iter(self._heart.values()) def __len__(self): return self._heart.__len__() def has_key(self, key): return key in self._heart def keys(self): return self._heart.keys() def values(self): return self._heart.values() def to_struct(self): return self._heart.copy() @classmethod def from_struct(cls, struct): return cls(struct) def key_in_filter(cls, key, key_filter): if isinstance(key_filter, list): return key in list elif isinstance(key_filter, slice): if key_filter.start is None and key_filter.stop is None and key_filter.step is None: return True else: raise NotImplementedError('Have not yet implemented key filter for slice: {}'.format(key_filter)) else: return key==key_filter
class ListofEntities(list): def __init__(self, typ): super(ListofEntities, self).__init__() from collections import OrderedDict self._dict_entities = OrderedDict() self._dict_entities_hash = {} self.typeItems = typ self.type_instance = typ() self.changed = [] def _update_hashes(self): self._dict_entities_hash = {hash(v): v for k, v in self._dict_entities.items()} def __hash__(self): return hash(frozenset(self._dict_entities)) def __str__(self): return self._dict_entities.__str__() def __repr__(self): return self._dict_entities.__repr__() def __unicode__(self): return self.__str__() def __getitem__(self, item): return self._dict_entities.values().__getitem__(item) def update_from_changed_entities(self, changed_entities): if changed_entities is None: return for entity in changed_entities: if hasattr(entity, 'is_tombstone') and entity.is_tombstone: continue try: self._dict_entities[entity.id].update_from_changed_entities(entity) except KeyError: self._dict_entities[entity.id] = entity self._update_hashes() def get(self, entity_id): return self._dict_entities.get(entity_id) def extend(self, objects, track=True): if not all(isinstance(x, self.typeItems) for x in objects): raise ValueError('this ListofEntities can only contain %s' % self.typeItems.__name__) for o in objects: self._dict_entities[o.id] = o self._dict_entities_hash[hash(o)] = o if track: self.changed.extend(objects) def append(self, o, track=True): if not isinstance(o, self.typeItems): raise ValueError('this ListofEntities can only contain %s' % self.typeItems.__name__) self._dict_entities[o.id] = o self._dict_entities_hash[hash(o)] = o if track: self.changed.append(o) def delete(self, o, track=True): if not isinstance(o, self.typeItems): raise ValueError('this ListofEntities can only contain %s' % self.typeItems.__name__) if o.id in self._dict_entities: del self._dict_entities[o.id] if hash(o) in self._dict_entities_hash: del self._dict_entities_hash[hash(o)] if track: o.is_tombstone = True self.changed.append(o) def modify(self, o, track=True): if not isinstance(o, self.typeItems): raise ValueError('this ListofEntities can only contain %s' % self.typeItems.__name__) if o.id in self._dict_entities: h = hash(self._dict_entities[o.id]) if h in self._dict_entities_hash: del self._dict_entities_hash[h] self._dict_entities[o.id] = o self._dict_entities_hash[hash(o)] = o if track: self.changed.append(o) def __iter__(self): return self._dict_entities.values().__iter__() def __len__(self): return len(self._dict_entities) def containsduplicate(self, item): if not isinstance(item, self.typeItems): return False else: return item._hash() in self._dict_entities_hash def __contains__(self, item): if not isinstance(item, self.typeItems): return False else: return item.id in self._dict_entities def get_changed_entities(self): return self.changed
def __repr__(self): return "<ConnectionServer(%s)>"%OrderedDict.__repr__(self)
class Dictionary(): def __init__(self): self.__dictionary = OrderedDict() def clear(self): self.__dictionary.clear() def get_json(self): return json.dumps(self.__dictionary) def set_item(self, key, value): self.__dictionary.__setitem__(key, value) return self def values(self): return [value for value in self.__dictionary.values().__iter__()] def items(self): return self.__dictionary.items() def copy(self): self.__dictionary def constain(self, key): return key in self.get_list_of_key() def get(self, key): return self.__dictionary.get(key) def clear(self, key, default): self.__dictionary.pop(key, default) def format(self, *args, **kwargs): return self.__dictionary.__format__(*args, **kwargs) def ne(self, *args, **kwargs): return self.__dictionary.__ne__(*args, **kwargs) def repr(self, *args, **kwargs): return self.__dictionary.__repr__(*args, **kwargs) def ge(self, *args, **kwargs): return self.dictionary__ge__(*args, **kwargs) def __sizeof__(self): return self.__dictionary.__sizeof__() def setattr(self, *args, **kwargs): return self.__dictionary.__setattr__(*args, **kwargs) def dir(self): return self.__dictionary.__dir__() def le(self, *args, **kwargs): return self.__dictionary.__le__(*args, **kwargs) def delattr(self, *args, **kwargs): return self.__dictionary.__delattr__(*args, **kwargs) def hash(self, *args, **kwargs): return self.__dictionary.__hash__(*args, **kwargs) def gt(self, *args, **kwargs): return self.__dictionary.__gt__(*args, **kwargs) def eq(self, *args, **kwargs): return self.__dictionary.__eq__(*args, **kwargs) def getattribute(self, *args, **kwargs): return self.__dictionary.__getattribute__(*args, **kwargs) def str(self, *args, **kwargs): return self.__dictionary.__str__(*args, **kwargs) def reduce(self, *args, **kwargs): return self.__dictionary.__reduce__(*args, **kwargs) def reduce_ex(self, *args, **kwargs): return self.__dictionary.__reduce_ex__(*args, **kwargs) def lt(self, *args, **kwargs): return self.__dictionary.__lt__(*args, **kwargs) def keys(self): return self.get_list_of_key() def get_list_of_key(self): return [key for key in self.__dictionary.keys().__iter__()]
class UniversalOrderedStruct(UniversalCollection): """ Mostly like an OrderedDict, but it behaves like a list, in that (for x in struct) and (x in struct) looks over values, not keys. """ def __init__(self, *initializer): if len(initializer) == 1 and type(initializer[0]) is dict: d = initializer[0] initializer = [OrderedDict((k, d[k]) for k in sorted(d.keys()))] self._heart = OrderedDict(*initializer) def __contains__(self, item): return item in self._heart.values() def __setitem__(self, key, value): if key is next or isinstance(key, int): raise InvalidKeyError( 'This sequence is an {}, and cannot be given {} key: {}'. format(self.__class__.__name__, key.__class__.__name__, key)) self._heart.__setitem__(key, value) def __getitem__(self, selector): if isinstance(selector, (list, slice, np.ndarray)): if isinstance(selector, slice): all_keys = list(self.keys()) start_index = all_keys.index( selector.start) if selector.start is not None else None stop_index = all_keys.index( selector.stop) if selector.stop is not None else None keys = all_keys[start_index:stop_index:selector.step] else: keys = selector return UniversalOrderedStruct((k, self[k]) for k in keys) else: return self._heart.__getitem__(selector) def __repr__(self): rep = self._heart.__repr__() return self.__class__.__name__ + rep[len(OrderedDict.__class__.__name__ ):] def __iter__(self): return iter(self._heart.values()) def __len__(self): return self._heart.__len__() def has_key(self, key): return key in self._heart def keys(self): return self._heart.keys() def values(self): return self._heart.values() def to_struct(self): return self._heart.copy() @classmethod def from_struct(cls, struct): return cls(struct) def key_in_filter(cls, key, key_filter): if isinstance(key_filter, list): return key in list elif isinstance(key_filter, slice): if key_filter.start is None and key_filter.stop is None and key_filter.step is None: return True else: raise NotImplementedError( 'Have not yet implemented key filter for slice: {}'.format( key_filter)) else: return key == key_filter
def __repr__(self): outf = OrderedDict() for key in self.keys(): outf[key] = self[key] return outf.__repr__()
class FeatureExtractor(): ''' ML: I am not sure if using file buffers is the best way of going about it: on the one hand it abstracts away the file access, on the other hand it will probably always be relatively slow... maybe not a major issue since it will only be ran once (or a small number of times) for each project. Preliminarily just thinking about single channel data. Include multichannel features later ''' def __init__(self, settings_dict=None): self.settings = settings_dict if self.settings is None: self.settings = OrderedDict( window_length= 5, # length in seconds for the segments on which to compute features overlap=.5, # overlap ratio between windows window='rectangular', # power_bands = [(1, 4), (4, 8), (8, 12), (12, 30), (30, 50), (50, 70), (70, 120)], # number_of_features = 15, feature_labels=[ 'min', 'max', 'mean', 'log std', 'kurtosis', 'skewness', 'log coastline (log sum of abs diff)', 'log power in band (1, 4) Hz', 'log power in band (4, 8) Hz', 'log power in band (8, 12) Hz', 'log power in band (12, 30) Hz', 'log power in band (30, 50) Hz', 'log power in band (50, 70) Hz', 'log power in band (70, 120) Hz', 'Spectrum entropy' ], feature_time_functions=[ 'np.min', 'np.max', 'np.mean', 'lambda x:np.log(np.std(x))', 'stats.kurtosis', 'stats.skew', 'lambda d:np.log(np.mean(np.abs(np.diff(d,axis=0))))' ], feature_freq_functions=[ 'fe.powerf(1, 4)', 'fe.powerf(4, 8)', 'fe.powerf(8, 12)', 'fe.powerf(12, 30)', 'fe.powerf(30, 50)', 'fe.powerf(50, 70)', 'fe.powerf(70, 120)', 'fe.reg_entropy' ], function_module_dependencies=[ ('numpy', 'np'), ('pyecog2.feature_extractor', 'fe'), ('scipy.stats', 'stats') ]) self.update_from_settings() def update_from_settings(self, settings=None): if settings is not None: self.settings = settings module_dict = {} for module, alias in self.settings['function_module_dependencies']: if alias is None or alias == '': alias = module module_dict[alias] = import_module(module) self.feature_time_functions = [ eval(f, module_dict) for f in self.settings['feature_time_functions'] ] self.feature_freq_functions = [ eval(f, module_dict) for f in self.settings['feature_freq_functions'] ] my_worker_flist_init( self.feature_time_functions, self.feature_freq_functions) # Workaround for multiprocessing def load_settings(self, fname): with open(fname) as f: settings = json.load(f) self.update_from_settings(settings) def save_settings(self, fname): with open(fname, 'w') as json_file: json.dump(self.settings, json_file, indent=2, sort_keys=True) @property def number_of_features(self): return len(self.settings['feature_time_functions']) + len( self.settings['feature_freq_functions']) def extract_features_from_animal(self, animal, re_write=False, n_cores=-1, progress_bar=None): # Create feature files for each eeg file if n_cores == -1: n_cores = multiprocessing.cpu_count() Nfiles = len(animal.eeg_files) tuples = [(animal, i, re_write) for i in range(Nfiles)] # The following is not working yet... # with multiprocessing.Pool(processes=n_cores,initializer=my_worker_flist_init, # initargs = (self.feature_time_functions,self.feature_freq_functions)) as pool: # for i, _ in enumerate(pool.imap(self.extract_features_from_file, tuples)): # if progress_bar is not None: # progress_bar.setValue(i//Nfiles) for i, _ in enumerate(map(self.extract_features_from_file, tuples)): if progress_bar is not None: progress_bar.setValue((100 * (i + 1)) // Nfiles) # file_buffer = FileBuffer(animal,verbose=False) # Identify the time intervals and filenames to extract features # for i,eeg_fname in enumerate(animal.eeg_files): # feature_fname = '.'.join(eeg_fname.split('.')[:-1] + ['features']) # feature_metafname = '.'.join(eeg_fname.split('.')[:-1] + ['fmeta']) # time_range = [animal.eeg_init_time[i], animal.eeg_init_time[i]+animal.eeg_duration[i]] # if re_write or not os.path.isfile(feature_fname): # print('Extracting features for file',i+1,'of',len(animal.eeg_files),':',eeg_fname, end='\r') # self.extract_features_from_time_range(file_buffer, time_range, feature_fname, feature_metafname) # else: # # print(feature_fname,'already exists') # pass def extract_features_from_file(self, animal_fileIndex_rewrite_tuple): animal, i, re_write = animal_fileIndex_rewrite_tuple eeg_fname = animal.eeg_files[i] feature_fname = '.'.join(eeg_fname.split('.')[:-1] + ['features']) feature_metafname = '.'.join(eeg_fname.split('.')[:-1] + ['fmeta']) time_range = [ animal.eeg_init_time[i], animal.eeg_init_time[i] + animal.eeg_duration[i] ] if re_write or not os.path.isfile(feature_fname): print('Extracting features for file', i + 1, 'of', len(animal.eeg_files), ':', eeg_fname, end='\r') file_buffer = FileBuffer(animal, verbose=False) self.extract_features_from_time_range(file_buffer, time_range, feature_fname, feature_metafname) else: # print(feature_fname,'already exists') pass def extract_features_from_time_range(self, file_buffer, time_range, feature_fname, feature_metafname): # print('time_range:',time_range,feature_fname,feature_metafname) window_step = self.settings['window_length'] * ( 1 - self.settings['overlap']) window_starts = np.arange(time_range[0], time_range[1], window_step) # print('window_starts:',window_starts) features = np.zeros((len(window_starts), self.number_of_features), dtype='double') window = get_window(self.settings['window'], 1) for i, window_init in enumerate(window_starts): data, time = file_buffer.get_data_from_range([ window_init, window_init + self.settings['window_length'] ]) # get all data from time window data += np.random.randn(*data.shape).astype(data.dtype) * 2**( -16 ) # add a bit of regularizing noise, bellow 24 bit noise floors if len(data) != len(window): window = get_window(self.settings['window'], len(data)) window.shape = (data.shape[0], 1) data *= window fs = 1 / (time[1] - time[0]) dataf = np.fft.rfft(data, axis=0) / len(data) # for j,func in enumerate(self.feature_time_functions): for j, func in enumerate(_time_flist): features[i, j] = func(data) n = j # for j,func in enumerate(self.feature_freq_functions): for j, func in enumerate(_freq_flist): features[i, j + n + 1] = func(dataf, fs) metadata = OrderedDict(fs=1 / window_step, no_channels=int(self.number_of_features), data_format=str(features.dtype), volts_per_bit=0, transmitter_id=str(file_buffer.animal.id), start_timestamp_unix=(time_range[0]), duration=(time_range[1] - time_range[0]), channel_labels=self.settings['feature_labels']) # print(metadata) with open(feature_metafname, 'w') as json_file: json.dump(metadata, json_file, indent=2, sort_keys=True) features.tofile(feature_fname) def __repr__(self): return 'FeatureExtractor with settings: ' + self.settings.__repr__()
def __repr__(self): return '{' + OrderedDict.__repr__(self)[8:-2] + '}'
class TypeSystem: """ TypeSystem keeps track of the distances of each variable. The distance of each variable is internally represented by c_ast node, and gets simplified and casted to strings when get_distance method is called""" _EXPR_NODES = (c_ast.BinaryOp, c_ast.TernaryOp, c_ast.UnaryOp, c_ast.ID, c_ast.Constant, c_ast.ArrayRef) def __init__(self, types=None): if types: self._types = types else: self._types = OrderedDict() def __str__(self): # convert AST representation to code representation for better human-readability return '{{{}}}'.format(', '.join( '{}: [{}, {}]'.format( name, aligned if aligned == '*' else _generator.visit(aligned), shadow if shadow == '*' else _generator.visit(shadow)) for name, (aligned, shadow) in self._types.items())) def __len__(self): return len(self._types) def __repr__(self): return self._types.__repr__() def __eq__(self, other): if isinstance(other, TypeSystem): return self._types.__repr__() == other.__repr__() else: return False def __contains__(self, item): return self._types.__contains__(item) def copy(self): return TypeSystem(copy.deepcopy(self._types)) def clear(self): self._types.clear() def variables(self): for name in self._types.keys(): yield name, self.get_distance(name) def apply(self, condition, is_true): simplifier = _DistanceSimplifier(condition, is_true) for name in self._types.keys(): self._types[name] = \ [simplifier.simplify(distance) if distance != '*' else distance for distance in self._types[name]] def diff(self, other): assert isinstance(other, TypeSystem) for name, *_ in other.variables(): if name not in self._types: yield (name, True) yield (name, False) else: aligned, shadow = self._types[name] other_aligned, other_shadow = other.get_raw_distance(name) if not is_node_equal(aligned, other_aligned): yield (name, True) if not is_node_equal(shadow, other_shadow): yield (name, False) def merge(self, other): assert isinstance(other, TypeSystem) for name, *_ in other.variables(): if name not in self._types: self._types[name] = other.get_raw_distance(name) else: cur_align, cur_shadow = self._types[name] other_align, other_shadow = other.get_raw_distance(name) if not (cur_align == other_align == '*' or is_node_equal(cur_align, other_align)): self._types[name][0] = '*' if not (cur_shadow == other_shadow == '*' or is_node_equal(cur_shadow, other_shadow)): self._types[name][1] = '*' def get_raw_distance(self, name): """ return the raw distance, in AST node representation. :param name: The name of the variable. :return: (Aligned raw distance, Shadow raw distance), both of ast node type. """ return self._types[name] def get_distance(self, name): """ get the distance(align, shadow) of a variable, in str representation. :param name: The name of the variable. :return: (Aligned distance, Shadow distance) of the variable. """ return tuple('*' if distance == '*' else _generator.visit(distance) for distance in self._types[name]) def update_distance(self, name, align, shadow): # try simplify from sympy import simplify align = str(align).replace('[', '__LEFTBRACE__').replace( ']', '__RIGHTBRACE__') shadow = str(shadow).replace('[', '__LEFTBRACE__').replace( ']', '__RIGHTBRACE__') try: align = simplify(align) except Exception: pass try: shadow = simplify(shadow) except Exception: pass align = str(align).replace('__LEFTBRACE__', '[').replace('__RIGHTBRACE__', ']') shadow = str(shadow).replace('__LEFTBRACE__', '[').replace('__RIGHTBRACE__', ']') # convert to internal AST representation align = convert_to_ast(align) if align != '*' else '*' shadow = convert_to_ast(shadow) if shadow != '*' else '*' if name not in self._types: self._types[name] = [align, shadow] else: cur_aligned, cur_shadow = self._types[name] if not is_node_equal(cur_aligned, align): self._types[name][0] = align if not is_node_equal(cur_shadow, shadow): self._types[name][1] = shadow
def __repr__(self): return '%s(%s, %s)' % (type(self).__name__, self.default_factory, OrderedDict.__repr__(self))
def __repr__(self): return 'OrderedDefaultDict(%s, %s)' % (self.default_factory, OrderedDict.__repr__(self))
def __repr__(self, _repr_running={}): # pylint: disable=W0102 return 'DefaultOrderedDict(%s, %s)' % (self.default_factory, OrderedDict.__repr__(self))
def __repr__(self): return OrderedDict.__repr__(self)
def __repr__(self): return 'DefaultOrderedDict(%s, %s)' % (self.default_factory, OrderedDict.__repr__(self)[19:-1])