def __new__(cls, *args): """Create a new immutable GroupableOrderedDict.""" new = OrderedDict.__new__(cls) OrderedDict.__init__(new) if len(args) > 1: raise TypeError('expected at most 1 arguments, got {}' .format(len(args))) ordering = [] values = args[0] if values: if isinstance(values, GroupableOrderedDict): values = values.iteritems(with_order=False, repeated=True) elif isinstance(values, dict): if '__order__' in values: order = values.pop('__order__') tmp = [] c = Counter() for key in order: v = values[key] if not isinstance(v, (tuple, list)): if c[key] == 0: tmp.append((key, v)) else: raise Exception("Order and values don't match" "on key {0} at position {1}" .format(key, c[key])) else: tmp.append((key, v[c[key]])) c[key] += 1 values = tmp else: values = iteritems(values) for key, value in values: if key not in new: OrderedDict.__setitem__(new, key, []) v = [] if isinstance(value, (tuple, list)): for item in value: v.append(item) ordering.append(key) else: v.append(value) ordering.append(key) OrderedDict.__getitem__(new, key).extend(v) # Immutable... for key, value in dict.items(new): OrderedDict.__setitem__(new, key, tuple(value)) OrderedDict.__setitem__(new, '__order__', tuple(ordering)) return new
def iteritems(self, with_order=True, repeated=False): """Just like D.items() but as an iterator.""" if not repeated: if with_order: yield '__order__', dict.__getitem__(self, '__order__') occurences = { k: len(list(v)) for k, v in itertools.groupby(sorted(self.keys())) } for key, value in OrderedDict.items(self): if key == '__order__': continue if isinstance(value, (list, tuple)) and \ len(value) == 1 and \ occurences[key] == 1: yield key, value[0] else: yield key, value else: occurences = Counter() order = self['__order__'] if with_order: yield '__order__', order for key in order: yield key, OrderedDict.__getitem__(self, key)[occurences[key]] occurences[key] += 1
class TargetVars(Mapping): """Immutable ordered mapping from target variables to their values.""" EMPTY = EmptyTargetVar() def __init__(self, target_vars=tuple(), is_empty=True): if is_empty: target_vars = [(v, self.EMPTY) for v in target_vars] self._od = OrderedDict(target_vars) # getitem, len, iter wrap OrderedDict behavior def __getitem__(self, k): return self._od.__getitem__(k) def __len__(self): return self._od.__len__() def __iter__(self): return self._od.__iter__() def update(self, *args, **kwargs): cpy = self.copy() cpy._od.update(*args, **kwargs) return cpy def copy(self): return self.__class__(self._od) def __str__(self): """Format target vars for printing""" if len(self) > 1: return "({})".format(", ".join(self._od.keys())) else: return "".join(self._od.keys()) def defined_items(self): """Return copy of instance, omitting entries that are EMPTY""" return self.__class__([(k, v) for k,v in self.items() if v is not self.EMPTY], is_empty=False)
class InfoRow(): def __init__(self): self.dict=OrderedDict() def __getitem__(self, item): if isinstance(item, int): return self.dict.items()[item][1] return self.dict.__getitem__(item.lower()) def __contains__(self, item): return self.dict.__contains__(item.lower()) def __setitem__(self, key, value): if isinstance(key, int): self.dict.items()[key][1]=value self.dict.__setitem__(key.lower(),value) def get(self, k, d=None): if isinstance(k, int): try: return self.dict.items()[k][1] except: return d return self.dict.get(k.lower(), d) def __str__(self, rowSeparator="\n", columnSeparator="\t"): return getStr(dict2Str(self.dict)) def __unicode__(self): return str(self) def __repr__(self): return str(self)
class ConfigOptions(object): """Class for holding a collection of config options""" def __init__(self): self.options = OrderedDict() def add_option(self, key, default=NO_VALUE, doc='', parser=str): """Adds an option to the group :arg key: the key to look up :arg default: the default value (if any); must be a string that is parseable by the specified parser :arg doc: documentation for this config option :arg parser: the parser for converting this value to a Python object """ option = Option(key, default, doc, parser) self.options[key] = option def update(self, new_options): for option in new_options: if option.key in self.options: del self.options[option.key] self.options[option.key] = option def __iter__(self): return iter(self.options.values()) def __getitem__(self, key): return self.options.__getitem__(key)
def __getitem__(self, key): try: return OrderedDict.__getitem__(self, key) except KeyError: key = key.split(', ') key2 = ', '.join(key[1:]) return self[key[0]].__getitem__(key2)
class Metadata(MutableMapping): def __init__(self, seq=None): self.dct = OrderedDict(seq) if seq else OrderedDict() def __contains__(self, key): return self.dct.__contains__(key) def __getitem__(self, key): return self.dct.__getitem__(key) def __setitem__(self, key, value): self.dct.__setitem__(key, value) def __delitem__(self, key): return self.dct.__delitem__(key) def __iter__(self): return self.dct.__iter__() def __len__(self): return self.dct.__len__() def __repr__(self): return repr(self.dct) def __str__(self): return str(self.dct)
def __getitem__(self, key): if key in self: return OrderedDict.__getitem__(self, key) elif key in self.virtual: return self._get_virtual_variable(key) else: raise KeyError(repr(key))
class DebugBox(Window): def __init__(self,*args): super(DebugBox,self).__init__(*args) self.attr_list = OrderedDict() self.next_free = 1 def register(self,text,width,win): ''' store details of watch in an ordered dict of tuples dict1{"attr"+win} = attr, win_ref, width, index index = order on the screen (y coord) ''' if not self.attr_list.has_key(text + str(win)): self.attr_list.__setitem__(text + str(win),(text,win,width,self.next_free)) self.next_free += 1 else: pass #raise Exception # nothing should try to register twice def run(self): while 1: for key in self.attr_list.keys(): text,win,width,index = self.attr_list.__getitem__(key) self.addstr_no_cursor(text,index,1,width,win) time.sleep(1) self.refresh()
def handle_header(self, header): # FIXME: non-crlf-endings if not header.replace('\r\n', ''): self.headers_complete(True) return header = header.replace('\r\n', '') # multiline headers if header.startswith(('\t', ' ')): k = list(self.headers.keys())[-1] from collections import OrderedDict raw_v = OrderedDict.__getitem__(self.headers, k) k, v = raw_v[-1] # section 4.2 says that we MAY reduce whitespace down to a single # character, so let's do it. v = ''.join((v, header.lstrip())) raw_v[-1] = k, v else: k, v = header.split(':', 1) k = k.rstrip() v = v.lstrip() self.headers[k] = v
def pop(self, key, default=None): try: item = OrderedDict.__getitem__(self, key) del self[key] return item[0] except KeyError: return default
class Buckets(object): """Proxy for OrderedDict""" def __init__(self, *args, **kwargs): self._od = OrderedDict(*args, **kwargs) def __getattr__(self, a): return getattr(self._od, a) def __setitem__(self, *args, **kwargs): return self._od.__setitem__(*args, **kwargs) def __getitem__(self, *args, **kwargs): return self._od.__getitem__(*args, **kwargs) def __delitem__(self, *args, **kwargs): return self._od.__delitem__(*args, **kwargs) def __eq__(self, other): if isinstance(other, Buckets): return self._od.__eq__(other._od) else: return self._od.__eq__(other) def copy(self, *args, **kwargs): new = Buckets() new._od = self._od.copy() return new
def __getitem__(self, item): """Retourne l'item si présent ou None sinon""" if item not in self: res = None else: res = OrderedDict.__getitem__(self, item) return res
def __getitem__(self, key): """Override to look in our special defaults attribute, if it exists.""" try: return OrderedDict.__getitem__(self, key) except KeyError: if hasattr(self, 'defaults_'): return self.defaults_[key] raise
def __getitem__(self, key): key = key.lower() # all keys should be lower-case to make editing easier try: return OrderedDict.__getitem__(self, key) except KeyError: return self.__missing__(key)
def __getitem__(self, key): """ :return: string representing the concatenation of the values for field 'key'. """ value = OrderedDict.__getitem__(self, key) if not isinstance(value, (tuple, list)): value = [value] return Record.sep(key).join(filter(None, value))
def __getitem__(self, item): if isinstance(item, slice): data = [] start, stop = self.__sliceToIndex(item) for point in xrange(start, stop): data += [self.__getitem__(point)] return data if isinstance(item, int): if item < 0: return (OrderedDict.__getitem__(self, self._time_of_pos[(OrderedDict.__len__(self)) + item])) else: return OrderedDict.__getitem__(self, self._time_of_pos[item]) else: return OrderedDict.__getitem__(self, item)
def __getitem__(self, k): if not isinstance(k, slice): return OrderedDict.__getitem__(self, k) x = SlicableOrderedDict() for idx, key in enumerate(self.keys()): if k.start <= idx < k.stop: x[key] = self[key] return x
def use(self, key, default = None): if key in self: value = OrderedDict.__getitem__(self, key) OrderedDict.__delitem__(self, key) OrderedDict.__setitem__(self, key, value) return value else: return default
def set_ttl(self, key, seconds): is_have = OrderedDict.__getitem__(self,key) if is_have: expire_time = time.time() + seconds self.key_time_map[key] = {"time":time.time(),"max_age":0,"expire_time":expire_time} key_ttl = expire_time - time.time() if key_ttl > 0: return key_ttl return None
def __getitem__(self, key): """D[key]. It will return a list or an element depending on the quantity present. """ item = OrderedDict.__getitem__(self, key) if len(item) == 1 and key != '__order__': return item[0] return item
def __getLine(self, pos=None, add_time=False): if add_time is False: line = () else: line = (self._time.getTimeFromPos(pos), ) for var in OrderedDict.__iter__(self): line += (OrderedDict.__getitem__(self, var)[pos],) return line
def __contains__(self, key): try: item = OrderedDict.__getitem__(self, key) if time.time() - item[1] < self.max_age: return True else: del self[key] except KeyError: pass return False
def __getitem__(self, key, *args, **kwargs): # Get the key and remove it from the cache, or raise KeyError value = OrderedDict.__getitem__(self, key) del self[key] # Insert the (key, value) pair on the front of the cache OrderedDict.__setitem__(self, key, value) # Return the value from the cache return value
def __getitem__(self, key): try: val = OrderedDict.__getitem__(self, key) except KeyError: self.log.error( "No key named '{}' found in Blob. \n" "Available keys: {}".format(key, ', '.join(self.keys())) ) raise return val
class CouchDB: lock = Lock() def __init__(self): self.write_db = Database(Constants.WRITE_URL) self.read_db = Database(Constants.READ_URL) self.cache = OrderedDict() def getDoc(self, id): document = None try: self.lock.acquire() if id not in self.cache.keys(): #document not in cache document = self.read_db.get(id) if self.cache.__len__() == Constants.CACHE_SIZE: self.cache.popitem(False) #remove the oldest entry from cache self.cache.__setitem__(id, document) else: #get from cache document = self.cache.__getitem__(id) return document finally: self.lock.release() return document def createDoc(self, json): return self.write_db.create(json) def deleteDoc(self, id): try: self.lock.acquire() doc = self.getDoc(id) if id in self.cache: self.cache.pop(id) #clear document from cache self.write_db.delete(doc) finally: self.lock.release() def saveDoc(self, doc): self.db.save(doc) def updateDoc(self, doc): try: self.lock.acquire() if doc[Constants.DOCUMENT_ID] in self.cache: self.cache.pop(doc[Constants.DOCUMENT_ID]) #clear document from cache self.write_db.update(doc) finally: self.lock.release()
def __getitem__(self, key): """Extends OrderedDict.__getitem__() to handle path lists as keys""" if isinstance(key, list): value = self keys = deepcopy(key) while len(keys) > 0: value = value[keys.pop(0)] return value else: return OrderedDict.__getitem__(self, key)
class MetafeatureFunctions(object): def __init__(self): self.functions = OrderedDict() self.dependencies = OrderedDict() self.values = OrderedDict() def clear(self): self.values = OrderedDict() def __iter__(self): return self.functions.__iter__() def __getitem__(self, item): return self.functions.__getitem__(item) def __setitem__(self, key, value): return self.functions.__setitem__(key, value) def __delitem__(self, key): return self.functions.__delitem__(key) def __contains__(self, item): return self.functions.__contains__(item) def get_value(self, key): return self.values[key].value def set_value(self, key, item): self.values[key] = item def is_calculated(self, key): """Return if a helper function has already been executed. Necessary as get_value() can return None if the helper function hasn't been executed or if it returned None.""" return key in self.values def get_dependency(self, name): """Return the dependency of metafeature "name". """ return self.dependencies.get(name) def define(self, name, dependency=None): """Decorator for adding metafeature functions to a "dictionary" of metafeatures. This behaves like a function decorating a function, not a class decorating a function""" def wrapper(metafeature_class): instance = metafeature_class() self.__setitem__(name, instance) self.dependencies[name] = dependency return instance return wrapper
def __getitem__(self, key): # FIXME: how to handle slices more correctly? if isinstance(key, slice): return list(self.values())[key] elif key in self: return OrderedDict.__getitem__(self, key) # FIXME: is this the fastest way to implement this? # if it's an int, iterate the linked list and return the value elif isinstance(key, int): return self[self._get_key(key)]
def addData(self, data): """ Adds data to the set. Must match the variable order of the set and the number of variables in the set. """ if len(data) != 0: pos = 1 for var in OrderedDict.__iter__(self): (OrderedDict.__getitem__(self, var). addData([(column[0], column[pos]) for column in data])) pos += 1
def __getitem__(self, key): try: return OrderedDict.__getitem__(self, key) except KeyError: if isinstance(key, int): return list(self.values())[key]
def __new__(cls, *args): """Create a new immutable GroupableOrderedDict.""" new = OrderedDict.__new__(cls) OrderedDict.__init__(new) if len(args) > 1: raise TypeError('expected at most 1 arguments, got {}'.format( len(args))) ordering = [] values = args[0] if values: if isinstance(values, GroupableOrderedDict): values = values.iteritems(with_order=False, repeated=True) elif isinstance(values, dict): if '__order__' in values: order = values.pop('__order__') tmp = [] c = Counter() for key in order: v = values[key] if not isinstance(v, (tuple, list)): if c[key] == 0: tmp.append((key, v)) else: raise Exception( "Order and values don't match " "on key {0} at position {1}".format( key, c[key])) else: tmp.append((key, v[c[key]])) c[key] += 1 values = tmp else: values = iteritems(values) for key, value in values: if key not in new: OrderedDict.__setitem__(new, key, []) v = [] if isinstance(value, (tuple, list)): for item in value: v.append(item) ordering.append(key) elif isinstance(value, dict): if '__order__' in value: value = GroupableOrderedDict(value) v.append(value) ordering.append(key) else: v.append(value) ordering.append(key) OrderedDict.__getitem__(new, key).extend(v) # Immutable... for key, value in dict.items(new): OrderedDict.__setitem__(new, key, tuple(value)) OrderedDict.__setitem__(new, '__order__', tuple(ordering)) return new
def __getitem__(self,key): value = OrderedDict.__getitem__(self, key) return value['value']
def __getitem__(self, key): try: return OrderedDict.__getitem__(self, key) except KeyError: return self.__missing__(key)
class Robot(NotebookWriter): """ This is the base class from which all Robot subclasses should derive. A Robot supports the `with` context manager: Usage example: .. code-block:: python with Robot([("label1", "file1"), (label2, "file2")]) as robot: # Do something with robot. files are automatically closed when we exit. for label, abifile in self.items(): print(label) """ # filepaths are relative to `start`. None for asbolute paths. This flag is set in trim_paths start = None # Used in iter_lineopt to generate matplotlib linestyles. _LINE_COLORS = ["b", "r", "g", "m", "y", "k", "c"] _LINE_STYLES = [ "-", ":", "--", "-.", ] _LINE_WIDTHS = [ 2, ] def __init__(self, *args): """ Args: args is a list of tuples (label, filepath) """ self._abifiles, self._do_close = OrderedDict(), OrderedDict() self._exceptions = deque(maxlen=100) for label, abifile in args: self.add_file(label, abifile) @classmethod def get_supported_extensions(self): """List of strings with extensions supported by Robot subclasses.""" # This is needed to have all subclasses. from abipy.abilab import Robot return sorted([cls.EXT for cls in Robot.__subclasses__()]) @classmethod def class_for_ext(cls, ext): """Return the Robot subclass associated to the given extension.""" for subcls in cls.__subclasses__(): if subcls.EXT in (ext, ext.upper()): return subcls # anaddb.nc does not follow the extension rule... if ext.lower() == "anaddb": from abipy.dfpt.anaddbnc import AnaddbNcRobot as subcls return subcls raise ValueError( "Cannot find Robot subclass associated to extension %s\n" % ext + "The list of supported extensions (case insensitive) is:\n%s" % str(cls.get_supported_extensions())) @classmethod def from_dir(cls, top, walk=True, abspath=False): """ This class method builds a robot by scanning all files located within directory `top`. This method should be invoked with a concrete robot class, for example: robot = GsrRobot.from_dir(".") Args: top (str): Root directory walk: if True, directories inside `top` are included as well. abspath: True if paths in index should be absolute. Default: Relative to `top`. """ new = cls(*cls._open_files_in_dir(top, walk)) if not abspath: new.trim_paths(start=top) return new @classmethod def from_dirs(cls, dirpaths, walk=True, abspath=False): """ Similar to `from_dir` but accepts a list of directories instead of a single directory. Args: walk: if True, directories inside `top` are included as well. abspath: True if paths in index should be absolute. Default: Relative to `top`. """ items = [] for top in list_strings(dirpaths): items.extend(cls._open_files_in_dir(top, walk)) new = cls(*items) if not abspath: new.trim_paths(start=os.getcwd()) return new @classmethod def from_dir_glob(cls, pattern, walk=True, abspath=False): """ This class method builds a robot by scanning all files located within the directories matching `pattern` as implemented by glob.glob This method should be invoked with a concrete robot class, for example: robot = GsrRobot.from_dir_glob("flow_dir/w*/outdata/") Args: pattern: Pattern string walk: if True, directories inside `top` are included as well. abspath: True if paths in index should be absolute. Default: Relative to getcwd(). """ import glob items = [] for top in filter(os.path.isdir, glob.iglob(pattern)): items += cls._open_files_in_dir(top, walk=walk) new = cls(*items) if not abspath: new.trim_paths(start=os.getcwd()) return new @classmethod def _open_files_in_dir(cls, top, walk): """Open files in directory tree starting from `top`. Return list of Abinit files.""" if not os.path.isdir(top): raise ValueError("%s: no such directory" % str(top)) from abipy.abilab import abiopen items = [] if walk: for dirpath, dirnames, filenames in os.walk(top): filenames = [ f for f in filenames if cls.class_handles_filename(f) ] for f in filenames: abifile = abiopen(os.path.join(dirpath, f)) if abifile is not None: items.append((abifile.filepath, abifile)) else: filenames = [ f for f in os.listdir(top) if cls.class_handles_filename(f) ] for f in filenames: abifile = abiopen(os.path.join(top, f)) if abifile is not None: items.append((abifile.filepath, abifile)) return items @classmethod def class_handles_filename(cls, filename): """True if robot class handles filename.""" # Special treatment of AnaddbNcRobot if cls.EXT == "anaddb" and os.path.basename( filename).lower() == "anaddb.nc": return True return (filename.endswith("_" + cls.EXT + ".nc") or filename.endswith("." + cls.EXT)) # This for .abo @classmethod def from_files(cls, filenames, labels=None, abspath=False): """ Build a Robot from a list of `filenames`. if labels is None, labels are automatically generated from absolute paths. Args: abspath: True if paths in index should be absolute. Default: Relative to `top`. """ filenames = list_strings(filenames) from abipy.abilab import abiopen filenames = [f for f in filenames if cls.class_handles_filename(f)] items = [] for i, f in enumerate(filenames): try: abifile = abiopen(f) except Exception as exc: cprint("Exception while opening file: `%s`" % str(f), "red") cprint(exc, "red") abifile = None if abifile is not None: label = abifile.filepath if labels is None else labels[i] items.append((label, abifile)) new = cls(*items) if labels is None and not abspath: new.trim_paths(start=None) return new @classmethod def from_flow(cls, flow, outdirs="all", nids=None, ext=None, task_class=None): """ Build a robot from a |Flow| object. Args: flow: |Flow| object outdirs: String used to select/ignore the files in the output directory of flow, works and tasks outdirs="work" selects only the outdir of the Works, outdirs="flow+task" selects the outdir of the Flow and the outdirs of the tasks outdirs="-work" excludes the outdir of the Works. Cannot use ``+`` and ``-`` flags in the same string. Default: `all` that is equivalent to "flow+work+task" nids: List of node identifiers used to select particular nodes. Not used if None ext: File extension associated to the robot. Mainly used if method is invoked with the BaseClass task_class: Task class or string with the class name used to select the tasks in the flow. None implies no filtering. Usage example: .. code-block:: python with abilab.GsrRobot.from_flow(flow) as robot: print(robot) # That is equivalent to: with Robot.from_flow(flow, ext="GSR") as robot: print(robot) Returns: ``Robot`` subclass. """ robot = cls() if ext is None else cls.class_for_ext(ext)() all_opts = ("flow", "work", "task") if outdirs == "all": tokens = all_opts elif "+" in outdirs: assert "-" not in outdirs tokens = outdirs.split("+") elif "-" in outdirs: assert "+" not in outdirs tokens = [s for s in all if s not in outdirs.split("-")] else: tokens = list_strings(outdirs) if not all(t in all_opts for t in tokens): raise ValueError("Wrong outdirs string %s" % outdirs) if "flow" in tokens: robot.add_extfile_of_node(flow, nids=nids, task_class=task_class) if "work" in tokens: for work in flow: robot.add_extfile_of_node(work, nids=nids, task_class=task_class) if "task" in tokens: for task in flow.iflat_tasks(): robot.add_extfile_of_node(task, nids=nids, task_class=task_class) return robot def add_extfile_of_node(self, node, nids=None, task_class=None): """ Add the file produced by this node to the robot. Args: node: |Flow| or |Work| or |Task| object. nids: List of node identifiers used to select particular nodes. Not used if None task_class: Task class or string with class name used to select the tasks in the flow. None implies no filtering. """ if nids and node.node_id not in nids: return filepath = node.outdir.has_abiext(self.EXT) if not filepath: # Look in run.abi directory. filepath = node.wdir.has_abiext(self.EXT) # This to ignore DDB.nc files (only text DDB are supported) if filepath and filepath.endswith("_DDB.nc"): return if filepath: try: label = os.path.relpath(filepath) except OSError: # current working directory may not be defined! label = filepath # Filter by task_class (class or string with class name) if task_class is not None and not node.isinstance(task_class): return None self.add_file(label, filepath) def scan_dir(self, top, walk=True): """ Scan directory tree starting from ``top``. Add files to the robot instance. Args: top (str): Root directory walk: if True, directories inside ``top`` are included as well. Return: Number of files found. """ count = 0 for filepath, abifile in self.__class__._open_files_in_dir(top, walk): count += 1 self.add_file(filepath, abifile) return count def add_file(self, label, abifile, filter_abifile=None): """ Add a file to the robot with the given label. Args: label: String used to identify the file (must be unique, ax exceptions is raised if label is already present. abifile: Specify the file to be added. Accepts strings (filepath) or abipy file-like objects. filter_abifile: Function that receives an ``abifile`` object and returns True if the file should be added to the plotter. """ if is_string(abifile): from abipy.abilab import abiopen abifile = abiopen(abifile) if filter_abifile is not None and not filter_abifile(abifile): abifile.close() return # Open file here --> have to close it. self._do_close[abifile.filepath] = True if label in self._abifiles: raise ValueError("label %s is already present!" % label) self._abifiles[label] = abifile #def pop_filepath(self, filepath): # """ # Remove the file with the given `filepath` and close it. # """ # if label, abifile in self._abifiles.items(): # if abifile.filepath != filepath: continue # self._abifiles.pop(label) # if self._do_close.pop(abifile.filepath, False): # try: # abifile.close() # except Exception as exc: # print("Exception while closing: ", abifile.filepath) # print(exc) def iter_lineopt(self): """Generates matplotlib linestyles.""" for o in itertools.product(self._LINE_WIDTHS, self._LINE_STYLES, self._LINE_COLORS): yield {"linewidth": o[0], "linestyle": o[1], "color": o[2]} @staticmethod def ordered_intersection(list_1, list_2): """Return ordered intersection of two lists. Items must be hashable.""" set_2 = frozenset(list_2) return [x for x in list_1 if x in set_2] #def _get_ointersection_i(self, iattrname): # if len(self.abifiles) == 0: return [] # values = list(range(getattr(self.abifiles[0], iattrname))) # if len(self.abifiles) == 1: return values # for abifile in self.abifiles[1:]: # values = self.ordered_intersection(values, range(getattr(abifile, iattrname))) # return values @staticmethod def _to_relpaths(paths): """Convert a list of absolute paths to relative paths.""" root = os.getcwd() return [os.path.relpath(p, root) for p in paths] def pop_label(self, label): """ Remove file with the given ``label`` and close it. """ if label in self._abifiles: abifile = self._abifiles.pop(label) if self._do_close.pop(abifile.filepath, False): try: abifile.close() except Exception as exc: print("Exception while closing: ", abifile.filepath) print(exc) def change_labels(self, new_labels, dryrun=False): """ Change labels of the files. Args: new_labels: List of strings (same length as self.abifiles) dryrun: True to activate dryrun mode. Return: mapping new_label --> old_label. """ if len(new_labels) != len(self): raise ValueError("Robot has %d files while len(new_labels) = %d" % (len(new_labels), len(self))) old_labels = list(self._abifiles.keys()) if not dryrun: old_abifiles, self._abifiles = self._abifiles, OrderedDict() new2old = OrderedDict() for old, new in zip(old_labels, new_labels): new2old[new] = old if not dryrun: self._abifiles[new] = old_abifiles[old] else: print("old [%s] --> new [%s]" % (old, new)) return new2old def remap_labels(self, function, dryrun=False): """ Change labels of the files by executing ``function`` Args: function: Callable object e.g. lambda function. The output of function(abifile) is used as new label. Note that the function shall not return duplicated labels when applied to self.abifiles. dryrun: True to activate dryrun mode. Return: mapping new_label --> old_label. """ new_labels = [function(afile) for afile in self.abifiles] # Labels must be unique and hashable. if len(set(new_labels)) != len(new_labels): raise ValueError( "Duplicated labels are not allowed. Change input function.\nnew_labels %s" % str(new_labels)) return self.change_labels(new_labels, dryrun=dryrun) def trim_paths(self, start=None): """ Replace absolute filepaths in the robot with relative paths wrt to ``start`` directory. If start is None, os.getcwd() is used. Set ``self.start`` attribute, return ``self.start``. """ self.start = os.getcwd() if start is None else start old_paths = list(self._abifiles.keys()) old_new_paths = [(p, os.path.relpath(os.path.abspath(p), start=self.start)) for p in old_paths] old_abifiles = self._abifiles self._abifiles = OrderedDict() for old, new in old_new_paths: self._abifiles[new] = old_abifiles[old] return self.start @property def exceptions(self): """List of exceptions.""" return self._exceptions def __len__(self): return len(self._abifiles) #def __iter__(self): # return iter(self._abifiles) #def __contains__(self, item): # return item in self._abifiles def __getitem__(self, key): # self[key] return self._abifiles.__getitem__(key) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): """Activated at the end of the with statement.""" self.close() def keys(self): return self._abifiles.keys() def items(self): return self._abifiles.items() @property def labels(self): return list(self._abifiles.keys()) def get_label_files_str(self): """Return string with [label, filepath].""" from tabulate import tabulate return tabulate([(label, abifile.relpath) for label, abifile in self.items()], headers=["Label", "Relpath"]) + "\n" def show_files(self, stream=sys.stdout): """Show label --> file path""" stream.write(self.get_label_files_str()) def __repr__(self): """Invoked by repr.""" return self.get_label_files_str() def __str__(self): """Invoked by str.""" return self.to_string() def to_string(self, verbose=0): """String representation.""" lines = [ "%s with %d files in memory:\n" % (self.__class__.__name__, len(self.abifiles)) ] app = lines.append for i, f in enumerate(self.abifiles): app(f.to_string(verbose=verbose)) app("\n") return "\n".join(lines) def _repr_html_(self): """Integration with jupyter_ notebooks.""" return '<ol start="0">\n{}\n</ol>'.format("\n".join( "<li>%s</li>" % label for label, abifile in self.items())) @property def abifiles(self): """List of netcdf files.""" return list(self._abifiles.values()) def has_different_structures(self, rtol=1e-05, atol=1e-08): """ Check if structures are equivalent, return string with info about differences (if any). """ if len(self) <= 1: return "" formulas = set( [af.structure.composition.formula for af in self.abifiles]) if len(formulas) != 1: return "Found structures with different full formulas: %s" % str( formulas) lines = [] s0 = self.abifiles[0].structure for abifile in self.abifiles[1:]: s1 = abifile.structure if not np.allclose( s0.lattice.matrix, s1.lattice.matrix, rtol=rtol, atol=atol): lines.append("Structures have different lattice:") if not np.allclose( s0.frac_coords, s1.frac_coords, rtol=rtol, atol=atol): lines.append("Structures have different atomic positions:") return "\n".join(lines) #def apply(self, func_or_string, args=(), **kwargs): # """ # Applies function to all ``abifiles`` available in the robot. # Args: # func_or_string: If callable, the output of func_or_string(abifile, ...) is used. # If string, the output of getattr(abifile, func_or_string)(...) # args (tuple): Positional arguments to pass to function in addition to the array/series # kwargs: Additional keyword arguments will be passed as keywords to the function # Return: List of results # """ # if callable(func_or_string): # return [func_or_string(abifile, *args, *kwargs) for abifile in self.abifiles] # else: # return [getattrd(abifile, func_or_string)(*args, **kwargs) for abifile in self.abifiles] def is_sortable(self, aname, raise_exc=False): """ Return True if ``aname`` is an attribute of the netcdf file If raise_exc is True, AttributeError with an explicit message is raised. """ try: obj = None try: # abiifile.foo.bar? obj = getattrd(self.abifiles[0], aname) except AttributeError: # abifile.params[aname] ? if hasattr(self.abifiles[0], "params") and aname in self.abifiles[0].params: obj = self.abifiles[0].params[aname] # Let's try to convert obj to scalar. float(obj) return True except Exception: if not raise_exc: return False attrs = [] for key, obj in inspect.getmembers(self.abifiles[0]): # Ignores anything starting with underscore if key.startswith('_') or callable(obj) or hasattr( obj, "__len__"): continue attrs.append(key) # Add entries in params. if hasattr(self.abifiles[0], "params") and hasattr( self.abifiles[0].params, "keys"): attrs.extend(self.abifiles[0].params.keys()) raise AttributeError("""\ `%s` object has no attribute `%s`. Choose among: %s Note that this list is automatically generated. Not all entries are sortable (Please select number-like quantities)""" % (self.__class__.__name__, aname, str(attrs))) def _sortby_labelfile_list(self, labelfile_list, func_or_string, reverse=False, unpack=False): """ Return: list of (label, abifile, param) tuples where param is obtained via ``func_or_string``. or labels, abifiles, params if ``unpack`` """ if not func_or_string: # Catch None or empty items = [(label, abifile, label) for (label, abifile) in labelfile_list] if not unpack: return items else: return [t[0] for t in items], [t[1] for t in items ], [t[2] for t in items] elif callable(func_or_string): items = [(label, abifile, func_or_string(abifile)) for (label, abifile) in labelfile_list] else: # Assume string and attribute with the same name. # try in abifile.params if not hasattrd(abifile, func_or_string) self.is_sortable(func_or_string, raise_exc=True) if hasattrd(self.abifiles[0], func_or_string): items = [(label, abifile, getattrd(abifile, func_or_string)) for (label, abifile) in labelfile_list] else: items = [(label, abifile, abifile.params[func_or_string]) for (label, abifile) in labelfile_list] items = sorted(items, key=lambda t: t[2], reverse=reverse) if not unpack: return items else: return [t[0] for t in items], [t[1] for t in items ], [t[2] for t in items] def sortby(self, func_or_string, reverse=False, unpack=False): """ Sort files in the robot by ``func_or_string``. Args: func_or_string: Either None, string, callable defining the quantity to be used for sorting. If string, it's assumed that the abifile has an attribute with the same name and getattr is invoked. If callable, the output of func_or_string(abifile) is used. If None, no sorting is performed. reverse: If set to True, then the list elements are sorted as if each comparison were reversed. unpack: Return (labels, abifiles, params) if True Return: list of (label, abifile, param) tuples where param is obtained via ``func_or_string``. or labels, abifiles, params if ``unpack`` """ labelfile_list = list(self.items()) return self._sortby_labelfile_list(labelfile_list, func_or_string, reverse=reverse, unpack=unpack) def group_and_sortby(self, hue, func_or_string): """ Group files by ``hue`` and, inside each group` sort items by ``func_or_string``. Args: hue: Variable that define subsets of the data, which will be drawn on separate lines. Accepts callable or string If string, it's assumed that the abifile has an attribute with the same name and getattr is invoked. Dot notation is also supported e.g. hue="structure.formula" --> abifile.structure.formula If callable, the output of hue(abifile) is used. func_or_string: Either None, string, callable defining the quantity to be used for sorting. If string, it's assumed that the abifile has an attribute with the same name and getattr is invoked. If callable, the output of func_or_string(abifile) is used. If None, no sorting is performed. Return: List of :class:`HueGroup` instance. """ # Group by hue. # This is the section in which we support: callable, abifile.attr.name syntax or abifile.params["key"] items = list(self.items()) if callable(hue): key = lambda t: hue(t[1]) else: # Assume string. if hasattrd(self.abifiles[0], hue): key = lambda t: getattrd(t[1], hue) else: # Try in abifile.params if hasattr(self.abifiles[0], "params") and hue in self.abifiles[0].params: key = lambda t: t[1].params[hue] else: raise TypeError("""\ Cannot interpret hue argument of type `%s` and value `%s`. Expecting callable or attribute name or key in abifile.params""" % (type(hue), str(hue))) groups = [] for hvalue, labelfile_list in sort_and_groupby(items, key=key): # Use func_or_string to sort each group labels, abifiles, xvalues = self._sortby_labelfile_list( labelfile_list, func_or_string, unpack=True) groups.append(HueGroup(hvalue, xvalues, abifiles, labels)) return groups def close(self): """ Close all files that have been opened by the Robot. """ for abifile in self.abifiles: if self._do_close.pop(abifile.filepath, False): try: abifile.close() except: print("Exception while closing: ", abifile.filepath) print(exc) #@classmethod #def open(cls, obj, nids=None, **kwargs): # """ # Flexible constructor. obj can be a :class:`Flow` or a string with the directory containing the Flow. # `nids` is an optional list of :class:`Node` identifiers used to filter the set of :class:`Task` in the Flow. # """ # has_dirpath = False # if is_string(obj): # try: # from abipy.flowtk import Flow # obj = Flow.pickle_load(obj) # except: # has_dirpath = True # if not has_dirpath: # # We have a Flow. smeth is the name of the Task method used to open the file. # items = [] # smeth = "open_" + cls.EXT.lower() # for task in obj.iflat_tasks(nids=nids): #, status=obj.S_OK): # open_method = getattr(task, smeth, None) # if open_method is None: continue # abifile = open_method() # if abifile is not None: items.append((task.pos_str, abifile)) # return cls(*items) # else: # # directory --> search for files with the appropriate extension and open it with abiopen. # if nids is not None: raise ValueError("nids cannot be used when obj is a directory.") # return cls.from_dir(obj) #def get_attributes(self, attr_name, obj=None, retdict=False): # od = OrderedDict() # for label, abifile in self.items(): # obj = abifile if obj is None else getattr(abifile, obj) # od[label] = getattr(obj, attr_name) # if retdict: # return od # else: # return list(od.values()) def _exec_funcs(self, funcs, arg): """ Execute list of callable functions. Each function receives arg as argument. """ if not isinstance(funcs, (list, tuple)): funcs = [funcs] d = {} for func in funcs: try: key, value = func(arg) d[key] = value except Exception as exc: cprint("Exception: %s" % str(exc), "red") self._exceptions.append(str(exc)) return d @staticmethod def sortby_label(sortby, param): """Return the label to be used when files are sorted with ``sortby``.""" return "%s %s" % (sortby, param) if not ( callable(sortby) or sortby is None) else str(param) def get_structure_dataframes(self, abspath=False, filter_abifile=None, **kwargs): """ Wrap dataframes_from_structures function. Args: abspath: True if paths in index should be absolute. Default: Relative to getcwd(). filter_abifile: Function that receives an ``abifile`` object and returns True if the file should be added to the plotter. """ from abipy.core.structure import dataframes_from_structures if "index" not in kwargs: index = list(self._abifiles.keys()) if not abspath: index = self._to_relpaths(index) kwargs["index"] = index abifiles = self.abifiles if filter_abifile is not None else list( filter(filter_abifile, self.abifiles)) return dataframes_from_structures(struct_objects=abifiles, **kwargs) def get_lattice_dataframe(self, **kwargs): """Return |pandas-DataFrame| with lattice parameters.""" dfs = self.get_structure_dataframes(**kwargs) return dfs.lattice def get_coords_dataframe(self, **kwargs): """Return |pandas-DataFrame| with atomic positions.""" dfs = self.get_structure_dataframes(**kwargs) return dfs.coords def get_params_dataframe(self, abspath=False): """ Return |pandas-DataFrame| with the most important parameters. that are usually subject to convergence studies. Args: abspath: True if paths in index should be absolute. Default: Relative to `top`. """ rows, row_names = [], [] for label, abifile in self.items(): if not hasattr(abifile, "params"): import warnings warnings.warn("%s does not have `params` attribute" % type(abifile)) break rows.append(abifile.params) row_names.append(label) row_names = row_names if abspath else self._to_relpaths(row_names) import pandas as pd return pd.DataFrame(rows, index=row_names, columns=list(rows[0].keys())) ############################################## # Helper functions to plot pandas dataframes # ############################################## @staticmethod @wraps(plot_xy_with_hue) def plot_xy_with_hue(*args, **kwargs): return plot_xy_with_hue(*args, **kwargs) @staticmethod def _get_label(func_or_string): """ Return label associated to ``func_or_string``. If callable, docstring __doc__ is used. """ if func_or_string is None: return "" elif callable(func_or_string): if getattr(func_or_string, "__doc__", ""): return func_or_string.__doc__.strip() else: return func_or_string.__name__ else: return str(func_or_string) @add_fig_kwargs def plot_convergence(self, item, sortby=None, hue=None, ax=None, fontsize=12, **kwargs): """ Plot the convergence of ``item`` wrt the ``sortby`` parameter. Values can optionally be grouped by ``hue``. Args: item: Define the quantity to plot. Accepts callable or string If string, it's assumed that the abifile has an attribute with the same name and `getattr` is invoked. Dot notation is also supported e.g. hue="structure.formula" --> abifile.structure.formula If callable, the output of item(abifile) is used. sortby: Define the convergence parameter, sort files and produce plot labels. Can be None, string or function. If None, no sorting is performed. If string and not empty it's assumed that the abifile has an attribute with the same name and `getattr` is invoked. If callable, the output of sortby(abifile) is used. hue: Variable that define subsets of the data, which will be drawn on separate lines. Accepts callable or string If string, it's assumed that the abifile has an attribute with the same name and getattr is invoked. If callable, the output of hue(abifile) is used. ax: |matplotlib-Axes| or None if a new figure should be created. fontsize: legend and label fontsize. kwargs: keyword arguments passed to matplotlib plot method. Returns: |matplotlib-Figure| Example: robot.plot_convergence("energy") robot.plot_convergence("energy", sortby="nkpt") robot.plot_convergence("pressure", sortby="nkpt", hue="tsmear") """ ax, fig, plt = get_ax_fig_plt(ax=ax) if "marker" not in kwargs: kwargs["marker"] = "o" def get_yvalues(abifiles): if callable(item): return [float(item(a)) for a in abifiles] else: return [float(getattr(a, item)) for a in abifiles] if hue is None: labels, abifiles, params = self.sortby(sortby, unpack=True) yvals = get_yvalues(abifiles) #print("params", params, "\nyvals", yvals) ax.plot(params, yvals, **kwargs) else: groups = self.group_and_sortby(hue, sortby) for g in groups: yvals = get_yvalues(g.abifiles) label = "%s: %s" % (self._get_label(hue), g.hvalue) ax.plot(g.xvalues, yvals, label=label, **kwargs) ax.grid(True) ax.set_xlabel("%s" % self._get_label(sortby)) if sortby is None: rotate_ticklabels(ax, 15) ax.set_ylabel("%s" % self._get_label(item)) if hue is not None: ax.legend(loc="best", fontsize=fontsize, shadow=True) return fig @add_fig_kwargs def plot_convergence_items(self, items, sortby=None, hue=None, fontsize=6, **kwargs): """ Plot the convergence of a list of ``items`` wrt to the ``sortby`` parameter. Values can optionally be grouped by ``hue``. Args: items: List of attributes (or callables) to be analyzed. sortby: Define the convergence parameter, sort files and produce plot labels. Can be None, string or function. If None, no sorting is performed. If string and not empty it's assumed that the abifile has an attribute with the same name and `getattr` is invoked. If callable, the output of sortby(abifile) is used. hue: Variable that define subsets of the data, which will be drawn on separate lines. Accepts callable or string If string, it's assumed that the abifile has an attribute with the same name and getattr is invoked. Dot notation is also supported e.g. hue="structure.formula" --> abifile.structure.formula If callable, the output of hue(abifile) is used. fontsize: legend and label fontsize. kwargs: keyword arguments are passed to ax.plot Returns: |matplotlib-Figure| """ # Note: in principle one could call plot_convergence inside a loop but # this one is faster as sorting is done only once. # Build grid plot. nrows, ncols = len(items), 1 ax_list, fig, plt = get_axarray_fig_plt(None, nrows=nrows, ncols=ncols, sharex=True, sharey=False, squeeze=False) ax_list = ax_list.ravel() # Sort and group files if hue. if hue is None: labels, ncfiles, params = self.sortby(sortby, unpack=True) else: groups = self.group_and_sortby(hue, sortby) marker = kwargs.pop("marker", "o") for i, (ax, item) in enumerate(zip(ax_list, items)): if hue is None: # Extract data. if callable(item): yvals = [float(item(gsr)) for gsr in self.abifiles] else: yvals = [getattrd(gsr, item) for gsr in self.abifiles] if not is_string(params[0]): ax.plot(params, yvals, marker=marker, **kwargs) else: # Must handle list of strings in a different way. xn = range(len(params)) ax.plot(xn, yvals, marker=marker, **kwargs) ax.set_xticks(xn) ax.set_xticklabels(params, fontsize=fontsize) else: for g in groups: # Extract data. if callable(item): yvals = [float(item(gsr)) for gsr in g.abifiles] else: yvals = [getattrd(gsr, item) for gsr in g.abifiles] label = "%s: %s" % (self._get_label(hue), g.hvalue) ax.plot(g.xvalues, yvals, label=label, marker=marker, **kwargs) ax.grid(True) ax.set_ylabel(self._get_label(item)) if i == len(items) - 1: ax.set_xlabel("%s" % self._get_label(sortby)) if sortby is None: rotate_ticklabels(ax, 15) if i == 0 and hue is not None: ax.legend(loc="best", fontsize=fontsize, shadow=True) return fig @add_fig_kwargs def plot_lattice_convergence(self, what_list=None, sortby=None, hue=None, fontsize=8, **kwargs): """ Plot the convergence of the lattice parameters (a, b, c, alpha, beta, gamma). wrt the``sortby`` parameter. Values can optionally be grouped by ``hue``. Args: what_list: List of strings with the quantities to plot e.g. ["a", "alpha", "beta"]. None means all. item: Define the quantity to plot. Accepts callable or string If string, it's assumed that the abifile has an attribute with the same name and `getattr` is invoked. If callable, the output of item(abifile) is used. sortby: Define the convergence parameter, sort files and produce plot labels. Can be None, string or function. If None, no sorting is performed. If string and not empty it's assumed that the abifile has an attribute with the same name and `getattr` is invoked. If callable, the output of sortby(abifile) is used. hue: Variable that define subsets of the data, which will be drawn on separate lines. Accepts callable or string If string, it's assumed that the abifile has an attribute with the same name and getattr is invoked. Dot notation is also supported e.g. hue="structure.formula" --> abifile.structure.formula If callable, the output of hue(abifile) is used. ax: |matplotlib-Axes| or None if a new figure should be created. fontsize: legend and label fontsize. Returns: |matplotlib-Figure| Example: robot.plot_lattice_convergence() robot.plot_lattice_convergence(sortby="nkpt") robot.plot_lattice_convergence(sortby="nkpt", hue="tsmear") """ if not self.abifiles: return None # The majority of AbiPy files have a structure object # whereas Hist.nc defines final_structure. Use geattr and key to extract structure object. key = "structure" if not hasattr(self.abifiles[0], "structure"): if hasattr(self.abifiles[0], "final_structure"): key = "final_structure" else: raise TypeError("Don't know how to extract structure from %s" % type(self.abifiles[0])) # Define callbacks. docstrings will be used as ylabels. def a(afile): "a (Ang)" return getattr(afile, key).lattice.a def b(afile): "b (Ang)" return getattr(afile, key).lattice.b def c(afile): "c (Ang)" return getattr(afile, key).lattice.c def volume(afile): r"$V$" return getattr(afile, key).lattice.volume def alpha(afile): r"$\alpha$" return getattr(afile, key).lattice.alpha def beta(afile): r"$\beta$" return getattr(afile, key).lattice.beta def gamma(afile): r"$\gamma$" return getattr(afile, key).lattice.gamma items = [a, b, c, volume, alpha, beta, gamma] if what_list is not None: locs = locals() items = [locs[what] for what in list_strings(what_list)] # Build plot grid. nrows, ncols = len(items), 1 ax_list, fig, plt = get_axarray_fig_plt(None, nrows=nrows, ncols=ncols, sharex=True, sharey=False, squeeze=False) marker = kwargs.pop("marker", "o") for i, (ax, item) in enumerate(zip(ax_list.ravel(), items)): self.plot_convergence(item, sortby=sortby, hue=hue, ax=ax, fontsize=fontsize, marker=marker, show=False) if i != 0: set_visible(ax, False, "legend") if i != len(items) - 1: set_visible(ax, False, "xlabel") return fig def get_baserobot_code_cells(self, title=None): """ Return list of jupyter_ cells with calls to methods provided by the base class. """ # Try not pollute namespace with lots of variables. nbformat, nbv = self.get_nbformat_nbv() title = "## Code to compare multiple Structure objects" if title is None else str( title) return [ nbv.new_markdown_cell(title), nbv.new_code_cell("robot.get_lattice_dataframe()"), nbv.new_code_cell( """# robot.plot_lattice_convergence(sortby="nkpt", hue="tsmear")""" ), nbv.new_code_cell("#robot.get_coords_dataframe()"), ]
def __getitem__(self, key): if key in self.keys(): return OrderedDict.__getitem__(self, key) else: return self.values()[0][key]
def lookup(self, key): with self.lock: value = OrderedDict.__getitem__(self, key) del self[key] OrderedDict.__setitem__(self, key, value) return value
def __getattr__(self, name): if not hasattr(self, '_init'): return OrderedDict.__getattr__(self, name) return OrderedDict.__getitem__(self, name)
def getandmove(self, key): OrderedDict.move_to_end(self, key) return OrderedDict.__getitem__(self, key)
def __getitem__(self, i): self._fetch() return OrderedDict.__getitem__(self, i)
class Scrapbook(collections.abc.MutableMapping): """ A collection of notebooks represented as a dictionary of notebooks """ def __init__(self): self._notebooks = OrderedDict() def __setitem__(self, key, value): # If notebook is a path str then load the notebook. if isinstance(value, string_types): value = Notebook(value) self._notebooks.__setitem__(key, value) def __getitem__(self, key): return self._notebooks.__getitem__(key) def __delitem__(self, key): return self._notebooks.__delitem__(key) def __iter__(self): return self._notebooks.__iter__() def __len__(self): return self._notebooks.__len__() @property @deprecated('1.0.0') def papermill_dataframe(self): """list: a list of data names from a collection of notebooks""" # Backwards compatible dataframe interface df_list = [] for key in self._notebooks: nb = self._notebooks[key] df = nb.papermill_dataframe df["key"] = key df_list.append(df) return pd.concat(df_list).reset_index(drop=True) @property @deprecated('0.4.0', 'metrics') def papermill_metrics(self): return self.metrics @property def metrics(self): """list: a list of metrics from a collection of notebooks""" df_list = [] for key in self._notebooks: nb = self._notebooks[key] df = nb.metrics df["key"] = key df_list.append(df) return pd.concat(df_list).reset_index(drop=True) @property def notebooks(self): """list: a sorted list of associated notebooks.""" return self.values() @property def notebook_scraps(self): """dict: a dictionary of the notebook scraps by key.""" return OrderedDict([(key, nb.scraps) for key, nb in self._notebooks.items()]) @property def scraps(self): """dict: a dictionary of the merged notebook scraps.""" return Scraps(merge_dicts(nb.scraps for nb in self.notebooks)) def scraps_report(self, scrap_names=None, notebook_names=None, include_data=False, headers=True): """ Display scraps as markdown structed outputs. Parameters ---------- scrap_names : str or iterable[str] (optional) the scraps to display as reported outputs notebook_names : str or iterable[str] (optional) notebook names to use in filtering on scraps to report include_data : bool (default: False) indicator that data-only scraps should be reported header : bool (default: True) indicator for if the scraps should render with a header """ # Keep slow import lazy from IPython.display import display as ip_display, Markdown def trim_repr(data): # Generate a small data representation for display purposes if not isinstance(data, string_types): data_str = repr(data) if len(data_str) > 102: data_str = data_str[:100] + "..." return data_str if isinstance(scrap_names, string_types): scrap_names = [scrap_names] scrap_names = set(scrap_names or []) if notebook_names is None: notebook_names = self._notebooks.keys() elif isinstance(notebook_names, string_types): notebook_names = [notebook_names] for i, nb_name in enumerate(notebook_names): notebook = self[nb_name] if headers: if i > 0: ip_display(Markdown("<hr>")) # tag between outputs ip_display(Markdown("### {}".format(nb_name))) for name in scrap_names or notebook.scraps.display_scraps.keys(): if headers: ip_display(Markdown("#### {}".format(name))) notebook.reglue(name, raise_on_missing=False, unattached=True) if include_data: for name, scrap in scrap_names or notebook.scraps.data_scraps.items( ): if scrap.display is None and scrap.data is not None: if headers: ip_display(Markdown("#### {}".format(name))) ip_display(trim_repr(scrap.data)) else: ip_display("{}: {}".format(scrap.name, trim_repr(scrap.data)))
def __getitem__(self, key): """ Override ``[]`` indexing. """ return OrderedDict.__getitem__(self, key.lower())
def __getitem__(self, key): value = OrderedDict.__getitem__(self, key) super(LRULimitedSizeDict, self).__delitem__(key) super(LRULimitedSizeDict, self).__setitem__(key, value)
def __getitem__(self, item): e = OrderedDict.__getitem__(self, item) del self[item] OrderedDict.__setitem__(self, item, e) return e
def __getitem__(self, item): try: return OrderedDict.__getitem__(self, item) except KeyError: value = self[item] = type(self)() return value
class BaseAWSObject(object): def __init__(self, title, template=None, **kwargs): self.title = title self.template = template # Cache the keys for validity checks self.propnames = list(self.props.keys()) self.attributes_ = ['DependsOn', 'DeletionPolicy', 'Metadata', 'UpdatePolicy', 'Condition', 'CreationPolicy'] # try to validate the title if its there if self.title: self.validate_title() # Create the list of properties set on this object by the user self.properties = OrderedDict() dictname = getattr(self, 'dictname', None) if dictname: self.resource = { dictname: self.properties, } else: self.resource = self.properties if hasattr(self, 'resource_type') and self.resource_type is not None: self.resource['type'] = self.resource_type self.__initialized = True # Check for properties defined in the class for k, (_, required) in list(self.props.items()): v = getattr(type(self), k, None) if v is not None and k not in kwargs: self.__setattr__(k, v) # Now that it is initialized, populate it with the kwargs for k, v in list(kwargs.items()): self.__setattr__(k, v) # Bound it to template if we know it if self.template is not None: self.template.add_resource(self) def __getattr__(self, name): try: if name in self.attributes_: return self.resource[name] else: return self.properties.__getitem__(name) except KeyError: # Fall back to the name attribute in the object rather than # in the properties dict. This is for non-OpenStack backwards # compatibility since OpenStack objects use a "name" property. if name == 'name': return self.__getattribute__('title') raise AttributeError(name) def __setattr__(self, name, value): if name in list(self.__dict__.keys()) \ or '_BaseAWSObject__initialized' not in self.__dict__: return dict.__setattr__(self, name, value) elif name in self.attributes_: self.resource[name] = value return None elif name in self.propnames: # Check the type of the object and compare against what we were # expecting. expected_type = self.props[name][0] # If the value is a AWSHelperFn we can't do much validation # we'll have to leave that to Amazon. Maybe there's another way # to deal with this that we'll come up with eventually. # We'll do validation below if there are AWSHelperFn in a # list context. if isinstance(value, AWSHelperFn) and \ not isinstance(expected_type, list): return self.properties.__setitem__(name, value) # If it's a function, call it... elif isinstance(expected_type, types.FunctionType): try: value = expected_type(value) except Exception: # TODO fix this, title is none. ie. output = None.method function validator sys.stderr.write( "%s: %s.%s function validator '%s' threw " "exception:\n" % (self.__class__, self.title, name, expected_type.__name__)) raise return self.properties.__setitem__(name, value) # If it's a list of types, check against those types... elif isinstance(expected_type, list): # If we're expecting a list, then make sure it is a list if not isinstance(value, list): self._raise_type(name, value, expected_type) # Iterate over the list and make sure it matches our # type checks (as above accept AWSHelperFn because # we can't do the validation ourselves) for v in value: if not isinstance(v, tuple(expected_type)) \ and not isinstance(v, AWSHelperFn): self._raise_type(name, v, expected_type) # Validated so assign it return self.properties.__setitem__(name, value) # Final validity check, compare the type of value against # expected_type which should now be either a single type or # a tuple of types. elif isinstance(value, expected_type): return self.properties.__setitem__(name, value) else: self._raise_type(name, value, expected_type) type_name = getattr(self, 'resource_type', self.__class__.__name__) if type_name == 'AWS::CloudFormation::CustomResource' or \ type_name.startswith('Custom::'): # Add custom resource arguments to the dict without any further # validation. The properties of a CustomResource is not known. return self.properties.__setitem__(name, value) raise AttributeError("%s object does not support attribute %s" % (type_name, name)) def _raise_type(self, name, value, expected_type): raise TypeError('%s: %s.%s is %s, expected %s' % (self.__class__, self.title, name, type(value), expected_type)) def validate_title(self): if not valid_names.match(self.title): raise ValueError('Name "%s" not alphanumeric' % self.title) def validate(self): pass def to_dict(self): self._validate_props() self.validate() if self.properties: return encode_to_dict(self.resource) elif hasattr(self, 'resource_type'): d = {} for k, v in list(self.resource.items()): if k != 'Properties': d[k] = v return d else: return {} @classmethod def _from_dict(cls, title=None, **kwargs): props = {} for prop_name, value in list(kwargs.items()): try: prop_attrs = cls.props[prop_name] except KeyError: raise AttributeError("Object type %s does not have a " "%s property." % (cls.__name__, prop_name)) prop_type = prop_attrs[0] value = kwargs[prop_name] is_aws_object = is_aws_object_subclass(prop_type) if is_aws_object: if not isinstance(value, collections.Mapping): raise ValueError("Property definition for %s must be " "a Mapping type" % prop_name) value = prop_type._from_dict(**value) if isinstance(prop_type, list): if not isinstance(value, list): raise TypeError("Attribute %s must be a " "list." % prop_name) new_value = [] for v in value: new_v = v if is_aws_object_subclass(prop_type[0]): if not isinstance(v, collections.Mapping): raise ValueError( "Property definition for %s must be " "a list of Mapping types" % prop_name) new_v = prop_type[0]._from_dict(**v) new_value.append(new_v) value = new_value props[prop_name] = value if title: return cls(title, **props) return cls(**props) @classmethod def from_dict(cls, title, d): return cls._from_dict(title, **d) def _validate_props(self): for k, (_, required) in list(self.props.items()): if required and k not in self.properties: rtype = getattr(self, 'resource_type', "<unknown type>") title = getattr(self, 'title') msg = "Resource %s required in type %s" % (k, rtype) if title: msg += " (title: %s)" % title raise ValueError(msg)
def __getitem__(self, k): if not isinstance(k, slice): return OrderedDict.__getitem__(self, k) return SlicableOrderedDict(islice(self.items(), k.start, k.stop))
class IVCurveGroup(MutableMapping): def __init__(self, group_name=None, datasets=[], theta=0., phi=0., group_id=0): self.theta = theta self.phi = phi self._datasets = OrderedDict() self.datasets = datasets self.id = group_id self.name = group_name def __repr__(self): module = self.__class__.__module__ module = module if module != '__main__' else None if module is None or module == str.__class__.__module__: name = self.__class__.__name__ else: name = module + '.' + self.__class__.__name__ return ("{}(group_name='{}', datasets={}, theta={}, phi={}, " "group_id={})".format(name, self.name, self.datasets, self.theta, self.phi, self.id)) def __eq__(self, other): return self._datasets == other._datasets def __ne__(self, other): return self._datasets != other._datasets def __lt__(self, other): return len(self.id) < len(other.id) def __gt__(self, other): return len(self.id) > len(other.id) def __le__(self, other): return len(self.id) <= len(other.id) def __ge__(self, other): return len(self.id) >= len(other.id) def __contains__(self, key): if isinstance(key, str) or isinstance(key, unicode): try: # search each keys' beams for matching index keys = [index for index in self._datasets.keys()] key_str = str(key) found = False for index in keys: if key_str in str(index): key = index found = True break if not found: raise KeyError() except: key = eval(key) # probably just a simple tuple elif hasattr(key, 'index'): if callable(key.index): key = key.index() else: key = key.index return self._datasets.__contains__(key) def __len__(self): return len(self._datasets) def __delitem__(self, key): if isinstance(key, str) or isinstance(key, unicode): key = eval(key) elif isinstance(key, int): key = self._datasets.keys()[key] elif hasattr(key, 'index'): try: if callable(key.index): key = key.index() else: key = key.index except TypeError: pass del self._datasets[key] def __iter__(self): return iter(self._datasets) def __setitem__(self, key, value): if isinstance(key, str) or isinstance(key, unicode): key = eval(key) elif hasattr(key, 'index'): try: if callable(key.index): key = key.index() else: key = key.index except TypeError: pass self._datasets[key] = value def __getitem__(self, key): if isinstance(key, str) or isinstance(key, unicode): try: # search each keys' beams for matching index keys = self._datasets.keys() key_str = str(key) found = False for index in keys: if key_str in str(index): key = index found = True break if not found: raise KeyError except: key = eval(key) # probably just a simple tuple elif isinstance(key, int): key = self._datasets.keys()[key] elif isinstance(key, tuple): for beam_set in self._datasets.keys(): if key in beam_set: key = beam_set elif hasattr(key, 'index'): try: if callable(key.index): key = key.index() else: key = key.index except TypeError: pass return self._datasets.__getitem__(key) def __hash__(self): return self._datasets.__hash__() def clear(self): self._datasets.clear() def keys(self): self._datasets.keys() @property def name(self): return self._group_name or str(self.id) @property def id(self): return self._group_id @property def theta(self): return self._theta @property def phi(self): return self._phi @property def datasets(self): return list(set(self.values())) @id.setter def id(self, group_id): self._group_id = int(group_id) if group_id is not None else 0 @theta.setter def theta(self, theta): self._theta = float(theta) % 360. @phi.setter def phi(self, phi): self._phi = float(phi) % 360. @datasets.setter def datasets(self, iv_pairs): self.clear() for iv_pair in iv_pairs: if isinstance(iv_pair, IVCurvePair): self.__set_item(str(iv_pair.index), iv_pair) if (isinstance(iv_pair.index, set) or isinstance(iv_pair.index, BeamSet)): for index in iv_pair.index: self.__setitem__(index, iv_pair) elif isinstance(iv_pair, _tuple): self.__setitem__(iv_pair, iv_pairs[iv_pair]) @name.setter def name(self, name): self._group_name = name or "group_{}".format(self._group_id) @classmethod def load(cls, ctr_file, res_file=None, **group_kwargs): lines = [] try: with open(ctr_file, 'r') as f: lines = [line.lstrip() for line in f] except IOError: raise IOError("Failed to read from control file '{}'" "".format(group)) # read beam information from LEED theoretical result file if res_file == None: res_file, ext = os.path.splitext(ctr_file) if not ext[1:].isdigit(): res_file += '.res' else: # handle multiple datasets res_file = os.path.splitext(res_file)[0] + '.res' + ext try: theory_beams = IVCurveGroup.read_theory(res_file) except IOError: theory_beams = {} ivs = IVCurveGroup(**group_kwargs) for line in lines: iv = IVCurvePair.from_control_string(line, ctr_file) if isinstance(iv, IVCurvePair): for beam in iv.index.beams: beam.data = theory_beams[beam.index()] iv.theory = iv.index.get_combined_IV() iv.theory.path = res_file ivs[iv.index] = iv return ivs @classmethod def read_theory(cls, filename): with open(filename, 'r') as f: lines = [line.lstrip() for line in f if line.lstrip() != ''] beam_info = {} data = [] for line in lines: if line.startswith('#bn'): n_beams = int(line.split()[1]) elif line.startswith('#en'): en, ei, ef, es, = [eval(val) for val in line.split()[1:5]] elif line.startswith('#bi'): vars = [eval(var) for var in line.split()[1:]] beam_info[tuple(vars[1:3])] = (vars[0], vars[3]) elif line.startswith('#'): continue # skip general comment else: data.append([eval(var) for var in line.split()]) from numpy import loadtxt data = loadtxt(filename, dtype=float).transpose() x, data = data[0], data[1:] if len(data) != n_beams: f = lambda x, y: len(x) == len(y) if not all(f(y[i], y[i + 1]) for i in range(len(data) - 1)): raise ValueError("LEED theory data has different " "numbers of datapoints") for beam in beam_info: i, j = beam_info[beam] beam_info[beam] = (x, data[i]) return beam_info def write(self, ctr_file, res_file): from numpy import transpose with open(ctr_file, 'w') as f: f.write("{comments}\n".format(comments='# write test')) for dataset in self.datasets: for beam in dataset.index: # write individual beam data f.write("{line}\n", str(beam) + '\n') @property def rfactor(self): rf = 0.
def __getitem__(self, key): value = OrderedDict.__getitem__(self, key) self.__setitem__(key, value) return value
def __getitem__(self, key): try: return OrderedDict.__getitem__(self, key.lower()) except KeyError: self[key] = section = _config_dict_type() return section
def __getitem__(self, key): kl = key.lower() if kl not in self.keyMap: raise KeyError(key) return OrderedDict.__getitem__(self, self.keyMap[kl])
class MemCacheUnit(abc.ABC): """Memory Cache Unit.""" def __init__(self, *args, **kwargs): self.size_limit = kwargs.pop("size_limit", 0) self._size = 0 self.od = OrderedDict() def __setitem__(self, key, value): # TODO: thread safe?__setitem__ failure might cause inconsistent size? # precalculate the size after od.__setitem__ self._adjust_size(key, value) self.od.__setitem__(key, value) # move the key to end,make it latest self.od.move_to_end(key) if self.limited: # pop the oldest items beyond size limit while self._size > self.size_limit: self.popitem(last=False) def __getitem__(self, key): v = self.od.__getitem__(key) self.od.move_to_end(key) return v def __contains__(self, key): return key in self.od def __len__(self): return self.od.__len__() def __repr__(self): return f"{self.__class__.__name__}<size_limit:{self.size_limit if self.limited else 'no limit'} total_size:{self._size}>\n{self.od.__repr__()}" def set_limit_size(self, limit): self.size_limit = limit @property def limited(self): """whether memory cache is limited""" return self.size_limit > 0 @property def total_size(self): return self._size def clear(self): self._size = 0 self.od.clear() def popitem(self, last=True): k, v = self.od.popitem(last=last) self._size -= self._get_value_size(v) return k, v def pop(self, key): v = self.od.pop(key) self._size -= self._get_value_size(v) return v def _adjust_size(self, key, value): if key in self.od: self._size -= self._get_value_size(self.od[key]) self._size += self._get_value_size(value) @abc.abstractmethod def _get_value_size(self, value): raise NotImplementedError
class Enum(object): """ Enumerated type (enum) implementation for Python. :type enums: list of str :type replace: dict, optional :param replace: Dictionary of keys which are replaced by values. .. rubric:: Example >>> from obspy.core.util import Enum >>> units = Enum(["m", "s", "m/s", "m/(s*s)", "m*s", "other"]) There are different ways to access the correct enum values: >>> print(units.get('m/s')) m/s >>> print(units['S']) s >>> print(units.OTHER) other >>> print(units[3]) m/(s*s) >>> units.xxx # doctest: +ELLIPSIS Traceback (most recent call last): ... AttributeError: 'xxx' Changing enum values will not work: >>> units.m = 5 # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError >>> units['m'] = 'xxx' # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError Calling with a value will either return the mapped enum value or ``None``: >>> print(units("M*s")) m*s >>> units('xxx') >>> print(units(5)) other The following enum allows replacing certain entries: >>> units2 = Enum(["m", "s", "m/s", "m/(s*s)", "m*s", "other"], ... replace={'meter': 'm'}) >>> print(units2('m')) m >>> print(units2('meter')) m """ # marker needed for for usage within ABC classes __isabstractmethod__ = False def __init__(self, enums, replace={}): self.__enums = OrderedDict((str(e).lower(), e) for e in enums) self.__replace = replace def __call__(self, enum): try: return self.get(enum) except: return None def get(self, key): if isinstance(key, int): return list(self.__enums.values())[key] if key in self._Enum__replace: return self._Enum__replace[key.lower()] return self.__enums.__getitem__(key.lower()) def __getattr__(self, name): try: return self.get(name) except KeyError: raise AttributeError("'%s'" % (name, )) def __setattr__(self, name, value): if name == '_Enum__enums': self.__dict__[name] = value return elif name == '_Enum__replace': super(Enum, self).__setattr__(name, value) return raise NotImplementedError __getitem__ = get __setitem__ = __setattr__ def __contains__(self, value): return value.lower() in self.__enums def values(self): return list(self.__enums.values()) def keys(self): return list(self.__enums.keys()) def items(self): return list(self.__enums.items()) def iteritems(self): return iter(self.__enums.items()) def __str__(self): """ >>> enum = Enum(["c", "a", "b"]) >>> print(enum) Enum(["c", "a", "b"]) """ keys = list(self.__enums.keys()) return "Enum([%s])" % ", ".join(['"%s"' % _i for _i in keys]) def _repr_pretty_(self, p, cycle): p.text(str(self))
def search(self, key, opt=None): """ Searches for key using given opt following hierarchy rules Parameters ---------- key : str The name of the key to search for. opt : dict, optional Additional search parameters. Default is ``None``. Returns ------- tuple Two element tuple containing a bool indicating whether `key` was found, and its value. Raises ------ KeyError If `key` cannot be found, but is required (specified in `opt`) """ if miscutils.fwdebug_check(8, 'WCL_DEBUG'): miscutils.fwdebug_print("\tBEG") miscutils.fwdebug_print("\tinitial key = '%s'" % key) miscutils.fwdebug_print("\tinitial opts = '%s'" % opt) curvals = None found = False value = '' if hasattr(key, 'lower'): key = key.lower() else: print "key = %s" % key # if key contains period, use it exactly instead of scoping rules if isinstance(key, str) and '.' in key: if miscutils.fwdebug_check(8, 'WCL_DEBUG'): miscutils.fwdebug_print("\t. in key '%s'" % key) value = self found = True for k in key.split('.'): if miscutils.fwdebug_check(8, 'WCL_DEBUG'): miscutils.fwdebug_print("\t\t partial key '%s'" % k) if k in value: value = OrderedDict.__getitem__(value, k) if miscutils.fwdebug_check(8, 'WCL_DEBUG'): miscutils.fwdebug_print("\t\t next val '%s'" % value) found = True else: value = '' found = False break else: # start with stored current values if OrderedDict.__contains__(self, 'current'): curvals = copy.deepcopy( OrderedDict.__getitem__(self, 'current')) else: curvals = OrderedDict() # override with current values passed into function if given if opt is not None and 'currentvals' in opt: for ckey, cval in opt['currentvals'].items(): if miscutils.fwdebug_check(8, 'WCL_DEBUG'): miscutils.fwdebug_print( "using specified curval %s = %s" % (ckey, cval)) curvals[ckey] = cval if miscutils.fwdebug_check(6, 'WCL_DEBUG'): miscutils.fwdebug_print("curvals = %s" % curvals) if key in curvals: #print "found %s in curvals" % (key) found = True value = curvals[key] elif opt and 'searchobj' in opt and key in opt['searchobj']: found = True value = opt['searchobj'][key] else: #print dir(self) if hasattr(self, 'search_order'): for sect in self.search_order: #print "Searching section %s for key %s" % (sect, key) if "curr_" + sect in curvals: currkey = curvals['curr_' + sect] #print "\tcurrkey for section %s = %s" % (sect, currkey) if OrderedDict.__contains__(self, sect): sectdict = OrderedDict.__getitem__(self, sect) if currkey in sectdict: if key in sectdict[currkey]: found = True value = sectdict[currkey][key] break # lastly check global values if not found: #print "\t%s not found, checking global values" % (key) if OrderedDict.__contains__(self, key): found = True value = OrderedDict.__getitem__(self, key) if not found and opt and 'required' in opt and opt['required']: print "\n\nError: search for %s failed" % (key) print "\tcurrent = ", OrderedDict.__getitem__(self, 'current') print "\topt = ", opt print "\tcurvals = ", curvals print "\n\n" raise KeyError("Error: Search failed (%s)" % key) if miscutils.fwdebug_check(8, 'WCL_DEBUG'): miscutils.fwdebug_print("\tEND: found=%s, value=%s" % (found, value)) return found, value
def __getitem__(self, key): try: return OrderedDict.__getitem__(self, key) except KeyError: self[key] = ExpandingDict() return OrderedDict.__getitem__(self, key)
class Enum(object): """ Enumerated type (enum) implementation for Python. :type enums: list[str] :type replace: dict, optional :param replace: Dictionary of keys which are replaced by values. .. rubric:: Example >>> from obspy.core.util import Enum >>> units = Enum(["m", "s", "m/s", "m/(s*s)", "m*s", "other"]) There are different ways to access the correct enum values: >>> print(units.get('m/s')) m/s >>> print(units['S']) s >>> print(units.OTHER) other >>> print(units[3]) m/(s*s) >>> units.xxx # doctest: +ELLIPSIS Traceback (most recent call last): ... AttributeError: 'xxx' Changing enum values will not work: >>> units.m = 5 # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError >>> units['m'] = 'xxx' # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError Calling with a value will either return the mapped enum value or ``None``: >>> print(units("M*s")) m*s >>> units('xxx') >>> print(units(5)) other The following enum allows replacing certain entries: >>> units2 = Enum(["m", "s", "m/s", "m/(s*s)", "m*s", "other"], ... replace={'meter': 'm'}) >>> print(units2('m')) m >>> print(units2('meter')) m """ # marker needed for for usage within ABC classes __isabstractmethod__ = False def __init__(self, enums, replace={}): self.__enums = OrderedDict((str(e).lower(), e) for e in enums) self.__replace = replace def __call__(self, enum): try: return self.get(enum) except Exception: return None def get(self, key): if isinstance(key, int): return list(self.__enums.values())[key] if key in self._Enum__replace: return self._Enum__replace[key.lower()] return self.__enums.__getitem__(key.lower()) def __getattr__(self, name): try: return self.get(name) except KeyError: raise AttributeError("'%s'" % (name, )) def __setattr__(self, name, value): if name == '_Enum__enums': self.__dict__[name] = value return elif name == '_Enum__replace': super(Enum, self).__setattr__(name, value) return raise NotImplementedError __getitem__ = get __setitem__ = __setattr__ def __contains__(self, value): return value.lower() in self.__enums def values(self): return list(self.__enums.values()) def keys(self): return list(self.__enums.keys()) def items(self): return list(self.__enums.items()) def iteritems(self): return iter(self.__enums.items()) def __str__(self): """ >>> enum = Enum(["c", "a", "b"]) >>> print(enum) Enum(["c", "a", "b"]) >>> enum = Enum(["not existing", ... "not reported", ... "earthquake", ... "controlled explosion", ... "experimental explosion", ... "industrial explosion"]) >>> print(enum) # doctest: +NORMALIZE_WHITESPACE Enum(["not existing", "not reported", ..., "experimental explosion", "industrial explosion"]) """ return self.__repr__() def __repr__(self): """ >>> enum = Enum(["c", "a", "b"]) >>> print(repr(enum)) Enum(["c", "a", "b"]) >>> enum = Enum(["not existing", ... "not reported", ... "earthquake", ... "controlled explosion", ... "experimental explosion", ... "industrial explosion"]) >>> print(repr(enum)) # doctest: +NORMALIZE_WHITESPACE Enum(["not existing", "not reported", ..., "experimental explosion", "industrial explosion"]) """ def _repr_list_of_keys(keys): return ", ".join('"{}"'.format(_i) for _i in keys) keys = list(self.__enums.keys()) key_repr = _repr_list_of_keys(keys) index = int(len(keys)) while len(key_repr) > 100: if index == 0: key_repr = "..." break index -= 1 key_repr = (_repr_list_of_keys(keys[:index]) + ", ..., " + _repr_list_of_keys(keys[-index:])) return "Enum([{}])".format(key_repr) def _repr_pretty_(self, p, cycle): p.text(str(self))
def __getitem__(self, key): return OrderedDict.__getitem__(self, key)
def __getitem__(self, key): sane_key = key.replace("_", "").lower() return OrderedDict.__getitem__(self, sane_key)
def __getitem__(self, key, *args, **kwds): l = _lower(key) return OrderedDict.__getitem__(self, l, *args, **kwds)
def __getitem__(self, key): param_container = OrderedDict.__getitem__(self, key) return param_container.get()