def __init__(self, inifile='', commentchar=None, encoding=None, env=None, convertors=None, lazy=False, writable=False, raw=False, import_env=True): """ lazy is used to parse first but not deal at time, and only when the user invoke finish() function, it'll parse the data. import_env will import all environment variables """ super(Ini, self).__init__() self._inifile = inifile self._commentchar = commentchar or __default_env__.get( 'commentchar', '#') self._encoding = encoding or __default_env__.get('encoding', 'utf-8') self._env = __default_env__.get('env', {}).copy() self._env.update(env or {}) self._env['set'] = set self.update(self._env) self._globals = SortedDict() self._import_env = import_env if self._import_env: self._globals.update(os.environ) self._convertors = __default_env__.get('convertors', {}).copy() self._convertors.update(convertors or {}) self._lazy = lazy self._writable = writable self._raw = raw if lazy: self._globals.update(self._env.copy()) if self._inifile: self.read(self._inifile)
def __init__(self, inifile='', commentchar=None, encoding=None, env=None, convertors=None, lazy=False, writable=False, raw=False, import_env=True, basepath='.', pre_variables=None): """ lazy is used to parse first but not deal at time, and only when the user invoke finish() function, it'll parse the data. import_env will import all environment variables if inifile is dict, then automatically add to ini object """ super(Ini, self).__init__() if isinstance(inifile, dict): self._inifile = '' data = inifile else: self._inifile = inifile data = None self._basepath = basepath self._commentchar = commentchar or __default_env__.get( 'commentchar', '#') self._encoding = encoding or __default_env__.get('encoding', 'utf-8') self._env = __default_env__.get('env', {}).copy() self._env.update(env or {}) self._env['set'] = set self.update(self._env) self._globals = SortedDict() self._pre_variables = pre_variables or {} self._import_env = import_env if self._import_env: self._globals.update(os.environ) self._convertors = __default_env__.get('convertors', {}).copy() self._convertors.update(convertors or {}) self._lazy = lazy self._writable = writable self._raw = raw if lazy: self._globals.update(self._env.copy()) if self._inifile: self.read(self._inifile) if data: for k, v in data.items(): s = self.add(k) for _k, _v in v.items(): s[_k] = _v
def test_keys_are_sorted(keys): items = [(key, key + 1) for key in keys] sorted_items = sorted(items) sorted_keys = sorted(keys) d = SortedDict(items) assert sorted(d) == sorted_keys assert sorted(d.keys()) == sorted_keys assert d.items() == sorted(items) assert d.values() == sorted(v for k, v in items)
def test_get(self): a = SortedDict({'a': 1, 'b': 2}) self.assertEqual(a['a'], 1) self.assertEqual(a['b'], 2) self.assertRaises(KeyError, lambda: a['c']) self.assertEqual(a.get('a'), 1) self.assertEqual(a.get('b'), 2) self.assertEqual(a.get('c', 3), 3) self.assertIs(a.get('c'), None)
def __init__(self, Ini): self.commands = SortedDict() for k, v in Ini.items(): if k.startswith('program:'): kwargs = {} kwargs['name'] = name = k[8:] kwargs['command'] = v.command kwargs['cwd'] = v.get('directory', None) kwargs['logfile'] = v.get('logfile', name+'.log') kwargs['logfile_maxbytes'] = v.get('logfile_maxbytes', 50*1024*1024) kwargs['logfile_backups'] = v.get('logfile_backups', 10) kwargs['startretries'] = v.get('startretries', 3) kwargs['starting_time'] = v.get('starting_time', 1) self.commands[name] = Command(**kwargs)
def __init__(self, inifile='', commentchar=None, encoding=None, env=None, convertors=None, lazy=False, writable=False, raw=False, import_env=True): """ lazy is used to parse first but not deal at time, and only when the user invoke finish() function, it'll parse the data. import_env will import all environment variables """ super(Ini, self).__init__() self._inifile = inifile self._commentchar = commentchar or __default_env__.get('commentchar', '#') self._encoding = encoding or __default_env__.get('encoding', 'utf-8') self._env = __default_env__.get('env', {}).copy() self._env.update(env or {}) self._env['set'] = set self.update(self._env) self._globals = SortedDict() self._import_env = import_env if self._import_env: self._globals.update(os.environ) self._convertors = __default_env__.get('convertors', {}).copy() self._convertors.update(convertors or {}) self._lazy = lazy self._writable = writable self._raw = raw if lazy: self._globals.update(self._env.copy()) if self._inifile: self.read(self._inifile)
def freeze(self): """ Process all EvalValue to real value """ self._lazy = False for k, v in self.items(): if k in self._env: continue for _k, _v in v.items(): if isinstance(_v, Lazy): if self.writable: _v.get() else: v.__setitem__(_k, _v.get(), replace=True) del _v self._globals = SortedDict()
class Posts(object): def __init__(self, app, root_dir=None, file_ext=None): self.root_dir = root_dir if root_dir is not None else app.config['POSTS_DIRECTORY'] self.file_ext = file_ext if file_ext is not None else app.config['POSTS_FILE_EXTENSION'] self._app = app self._cache = SortedDict(key=lambda p: p.step) self._initialise_cache() @property def posts(self): if self._app.debug: return self._cache.values() else: return [post for post in self._cache.values() if post.published] def get_post_or_404(self, path): """ Returns the post object for given path, or raises a NotFound exception """ try: return self._cache[path] except KeyError: abort(404) def next_post(self, path): return self._cache.next(path) def previous_post(self, path): return self._cache.previous(path) def _initialise_cache(self): """ Walks the root directory and adds all posts to the cache """ for (root, dirpaths, filepaths) in os.walk(self.root_dir): for filepath in filepaths: filename, ext = os.path.splitext(filepath) if ext == self.file_ext: path = os.path.join(root, filepath).replace(self.root_dir, '') post = Post(path, root_dir=self.root_dir) self._cache[post.url_path] = post def get_posts_by_category(self, category): return [post for post in self._cache.values() if post.category == category]
def __init__(self, redis, events, periodics=[], select=[]): self.logger = logging.getLogger('reactor') self.selector = Selector(select) self.db = ReactorDB(redis) self.mapper = dict(self.mapper_gen(events)) self.periodics = periodics self.timeline = SortedDict() self.load()
def test_pop_popitem(self): a = SortedDict({'a': 1, 'b': 2}) self.assertEqual(a.pop('b'), 2) self.assertTrue('b' not in a) self.assertEqual(a.pop('b', 3), 3) self.assertEqual(a.pop(), 1) self.assertTrue('a' not in a) self.assertRaises(KeyError, lambda: a.pop()) a = SortedDict({'a': 1}) self.assertEqual(a.popitem(), ('a', 1)) self.assertTrue('a' not in a) self.assertEqual(len(a), 0) self.assertRaises(KeyError, lambda: a.popitem())
def __init__(self, Ini): self.commands = SortedDict() for k, v in Ini.items(): if k.startswith('program:'): kwargs = {} kwargs['name'] = name = k[8:] kwargs['command'] = v.command kwargs['cwd'] = v.get('directory', None) kwargs['logfile'] = v.get('logfile', name+'.log') kwargs['logfile_maxbytes'] = v.get('logfile_maxbytes', 50*1024*1024) kwargs['logfile_backups'] = v.get('logfile_backups', 10) self.commands[name] = Command(**kwargs)
def __init__(self, name, comments=None, encoding=None, root=None, info=None): super(Section, self).__init__() self._root = root self._name = name self.add_comment(comments=comments) self._field_comments = {} self._field_flag = {} self._encoding = encoding self._info = info #sync if self._root and self._lazy: # self._root._globals.setdefault(name, SortedDict()) self._root._globals[name] = SortedDict()
def __init__(self, inifile='', commentchar=None, encoding=None, env=None, convertors=None, lazy=False, writable=False, raw=False, import_env=True, basepath='.', pre_variables=None): """ lazy is used to parse first but not deal at time, and only when the user invoke finish() function, it'll parse the data. import_env will import all environment variables if inifile is dict, then automatically add to ini object """ super(Ini, self).__init__() if isinstance(inifile, dict): self._inifile = '' data = inifile else: self._inifile = inifile data = None self._basepath = basepath self._commentchar = commentchar or __default_env__.get('commentchar', '#') self._encoding = encoding or __default_env__.get('encoding', 'utf-8') self._env = __default_env__.get('env', {}).copy() self._env.update(env or {}) self._env['set'] = set self.update(self._env) self._globals = SortedDict() self._pre_variables = pre_variables or {} self._import_env = import_env if self._import_env: self._globals.update(os.environ) self._convertors = __default_env__.get('convertors', {}).copy() self._convertors.update(convertors or {}) self._lazy = lazy self._writable = writable self._raw = raw if lazy: self._globals.update(self._env.copy()) if self._inifile: self.read(self._inifile) if data: for k, v in data.items(): s = self.add(k) for _k, _v in v.items(): s[_k] = _v
class CommandsManager(object): def __init__(self, Ini): self.commands = SortedDict() for k, v in Ini.items(): if k.startswith('program:'): kwargs = {} kwargs['name'] = name = k[8:] kwargs['command'] = v.command kwargs['cwd'] = v.get('directory', None) kwargs['logfile'] = v.get('logfile', name+'.log') kwargs['logfile_maxbytes'] = v.get('logfile_maxbytes', 50*1024*1024) kwargs['logfile_backups'] = v.get('logfile_backups', 10) kwargs['startretries'] = v.get('startretries', 3) kwargs['starting_time'] = v.get('starting_time', 1) self.commands[name] = Command(**kwargs) def start(self, command=None): if not command: for k, command in self.commands.items(): command.do_start() else: cmd = self.commands.get(command, '') if not cmd: msg = "Program %s is not found" % command else: msg = cmd.do_start() return msg def stop(self, command): cmd = self.commands.get(command, '') if not cmd: msg = "Program %s is not found" % command else: msg = cmd.do_stop() return msg def shutdown(self): for k, command in self.commands.items(): command.do_stop() msg = 'shutdown successful' return msg def status(self): s = [] for k, command in self.commands.items(): s.append(command.do_status()) return '\n'.join(s) def check(self): for k, p in self.commands.items(): if not p.stop in (STOPPED, FATAL) and not p.is_ok(): p.do_start()
def __new__(cls, name, bases, dict): if bases != (object, ): dict.pop('pk', None) meta = dict.pop('Meta', None) if not meta: meta = type('Meta', (object, ), {}) def setdefault(attr, default): if not hasattr(meta, attr): setattr(meta, attr, default) setdefault('lazy', True) setdefault('dbname', name + '.dbf') if not hasattr(meta, 'fields'): meta.fields = SortedDict() for key, value in dict.iteritems(): if isinstance(value, fields.Field): meta.fields[key] = dict[key] [dict.pop(field) for field in meta.fields] if hasattr(meta, 'stream'): meta.dbf = DBF(meta.stream, meta.fields) else: meta.dbf = DBF(meta.dbname, meta.fields) if dict.get('__unicode__'): repr = (lambda self: u'<%s: %s>' % (name, self.__unicode__())) dict['__repr__'] = repr Model = type.__new__(cls, name, bases, dict) Model._meta = meta Model.objects = Manager(Model) return Model else: return type.__new__(cls, name, bases, dict)
class Ini(SortedDict): def __init__(self, inifile='', commentchar=None, encoding=None, env=None, convertors=None, lazy=False, writable=False, raw=False, import_env=True): """ lazy is used to parse first but not deal at time, and only when the user invoke finish() function, it'll parse the data. import_env will import all environment variables """ super(Ini, self).__init__() self._inifile = inifile self._commentchar = commentchar or __default_env__.get('commentchar', '#') self._encoding = encoding or __default_env__.get('encoding', 'utf-8') self._env = __default_env__.get('env', {}).copy() self._env.update(env or {}) self._env['set'] = set self.update(self._env) self._globals = SortedDict() self._import_env = import_env if self._import_env: self._globals.update(os.environ) self._convertors = __default_env__.get('convertors', {}).copy() self._convertors.update(convertors or {}) self._lazy = lazy self._writable = writable self._raw = raw if lazy: self._globals.update(self._env.copy()) if self._inifile: self.read(self._inifile) def set_filename(self, filename): self._inifile = filename def get_filename(self): return self._inifile filename = property(get_filename, set_filename) def read(self, fobj, filename=''): encoding = None if isinstance(fobj, (str, unicode)): f = open(fobj, 'rb') text = f.read() f.close() else: text = fobj.read() text = text + '\n' begin = 0 if text.startswith(codecs.BOM_UTF8): begin = 3 encoding = 'UTF-8' elif text.startswith(codecs.BOM_UTF16): begin = 2 encoding = 'UTF-16' if not encoding: try: unicode(text, 'UTF-8') encoding = 'UTF-8' except: encoding = defaultencoding self._encoding = encoding f = StringIO.StringIO(text) f.seek(begin) lineno = 0 comments = [] status = 'c' section = None while 1: lastpos = f.tell() line = f.readline() lineno += 1 if not line: break line = line.strip() if line: if line.startswith(self._commentchar): if lineno == 1: #first comment line b = r_encoding.search(line[1:]) if b: self._encoding = b.groups()[0] continue comments.append(line) elif line.startswith('[') and line.endswith(']'): sec_name = line[1:-1].strip() #process include notation if sec_name.startswith('include:'): _filename = sec_name[8:].strip() _filename = os.path.abspath(_filename) if os.path.exists(_filename): old_encoding = self._encoding self.read(_filename) self._encoding = old_encoding else: import warnings warnings.warn(Warning("Can't find the file [%s], so just skip it" % _filename), stacklevel=2) continue info = RawValue(self._inifile, lineno, sec_name) section = self.add(sec_name, comments, info=info) comments = [] elif '=' in line: if section is None: raise Exception, "No section found, please define it first in %s file" % self.filename #if find <=, then it'll replace the old value for mutable variables #because the default behavior will merge list and dict pos = line.find('<=') if pos != -1: begin, end = pos, pos+2 replace_flag = True else: pos = line.find('=') begin, end = pos, pos+1 replace_flag = False keyname = line[:begin].strip() #check keyname if keyname in self._env: raise KeyError("Settings key %s is alread defined in env, please change it's name" % keyname) rest = line[end:].strip() #if key= then value will be set '' if rest == '': v = None else: f.seek(lastpos+end) try: value, iden_existed = self.__read_line(f) except Exception, e: print_exc() raise Exception, "Parsing ini file error in %s:%d:%s" % (filename or self._inifile, lineno, line) if self._lazy: if iden_existed: v = EvalValue(value, filename or self._inifile, lineno, line) else: v = value else: if self._raw: v = RawValue(self._inifile, lineno, value, replace_flag) else: try: v = eval_value(value, self.env(), self[sec_name], self._encoding) except Exception as e: print_exc() print dict(self) raise Exception("Converting value (%s) error in %s:%d:%s" % (value, filename or self._inifile, lineno, line)) section.add(keyname, v, comments, replace=replace_flag) comments = [] else: comments.append(line)
def __new__(cls, name, bases, attrs): metaclass = attrs.get("__metaclass__") super_new = super(DocumentMetaclass, cls).__new__ if metaclass and issubclass(metaclass, DocumentMetaclass): return super_new(cls, name, bases, attrs) doc_fields = SortedDict() class_name = [name] superclasses = {} simple_class = True for base in bases: # Include all fields present in superclasses if hasattr(base, "_fields"): doc_fields.update(base._fields) class_name.append(base._class_name) # Get superclasses from superclass superclasses[base._class_name] = base superclasses.update(base._superclasses) if hasattr(base, "_meta"): # Ensure that the Document class may be subclassed - # inheritance may be disabled to remove dependency on # additional fields _cls and _types if base._meta.get("allow_inheritance", True) == False: raise ValueError("Document %s may not be subclassed" % base.__name__) else: simple_class = False meta = attrs.get("_meta", attrs.get("meta", {})) if "allow_inheritance" not in meta: meta["allow_inheritance"] = True # Only simple classes - direct subclasses of Document - may set # allow_inheritance to False if not simple_class and not meta["allow_inheritance"]: raise ValueError("Only direct subclasses of Document may set " '"allow_inheritance" to False') attrs["_meta"] = meta attrs["_class_name"] = ".".join(reversed(class_name)) attrs["_superclasses"] = superclasses # Add the document's fields to the _fields attribute declared_fields = {} for attr_name, attr_value in attrs.items(): if hasattr(attr_value, "__class__") and issubclass(attr_value.__class__, BaseField): attr_value.name = attr_name if not attr_value.db_field: attr_value.db_field = attr_name declared_fields[attr_name] = attr_value # sort fields based on creation_counter by_value = lambda x, y: cmp(x[1], y[1]) doc_fields.update(SortedDict(sorted(declared_fields.items(), by_value))) attrs["_fields"] = doc_fields new_class = super_new(cls, name, bases, attrs) for field in new_class._fields.values(): field.owner_document = new_class module = attrs.get("__module__") base_excs = tuple(base.DoesNotExist for base in bases if hasattr(base, "DoesNotExist")) or (DoesNotExist,) exc = subclass_exception("DoesNotExist", base_excs, module) new_class.add_to_class("DoesNotExist", exc) base_excs = tuple(base.MultipleObjectsReturned for base in bases if hasattr(base, "MultipleObjectsReturned")) base_excs = base_excs or (MultipleObjectsReturned,) exc = subclass_exception("MultipleObjectsReturned", base_excs, module) new_class.add_to_class("MultipleObjectsReturned", exc) global _document_registry _document_registry[name] = new_class return new_class
class Ini(SortedDict): def __init__(self, inifile='', commentchar=None, encoding=None, env=None, convertors=None, lazy=False, writable=False, raw=False, import_env=True, basepath='.', pre_variables=None): """ lazy is used to parse first but not deal at time, and only when the user invoke finish() function, it'll parse the data. import_env will import all environment variables if inifile is dict, then automatically add to ini object """ super(Ini, self).__init__() if isinstance(inifile, dict): self._inifile = '' data = inifile else: self._inifile = inifile data = None self._basepath = basepath self._commentchar = commentchar or __default_env__.get('commentchar', '#') self._encoding = encoding or __default_env__.get('encoding', 'utf-8') self._env = __default_env__.get('env', {}).copy() self._env.update(env or {}) self._env['set'] = set self.update(self._env) self._globals = SortedDict() self._pre_variables = pre_variables or {} self._import_env = import_env if self._import_env: self._globals.update(os.environ) self._convertors = __default_env__.get('convertors', {}).copy() self._convertors.update(convertors or {}) self._lazy = lazy self._writable = writable self._raw = raw if lazy: self._globals.update(self._env.copy()) if self._inifile: self.read(self._inifile) if data: for k, v in data.items(): s = self.add(k) for _k, _v in v.items(): s[_k] = _v def set_filename(self, filename): self._inifile = filename def get_filename(self): return self._inifile def set_basepath(self, basepath): self._basepath = basepath def set_pre_variables(self, v): self._pre_variables = v or {} filename = property(get_filename, set_filename) def _pre_var(self, value): """ replace predefined variables, the format is #{name} """ def sub_(m): return self._pre_variables.get(m.group()[2:-1].strip(), '') return r_pre_var.sub(sub_, value) def read(self, fobj, filename=''): encoding = None if isinstance(fobj, (str, unicode)): f = open(fobj, 'rb') text = f.read() f.close() else: text = fobj.read() text = text + '\n' begin = 0 if text.startswith(codecs.BOM_UTF8): begin = 3 encoding = 'UTF-8' elif text.startswith(codecs.BOM_UTF16): begin = 2 encoding = 'UTF-16' if not encoding: try: unicode(text, 'UTF-8') encoding = 'UTF-8' except: encoding = defaultencoding self._encoding = encoding f = StringIO.StringIO(text) f.seek(begin) lineno = 0 comments = [] status = 'c' section = None while 1: lastpos = f.tell() line = f.readline() lineno += 1 if not line: break line = line.strip() if line: if line.startswith(self._commentchar): if lineno == 1: #first comment line b = r_encoding.search(line[1:]) if b: self._encoding = b.groups()[0] continue comments.append(line) elif line.startswith('[') and line.endswith(']'): sec_name = line[1:-1].strip() #process include notation if sec_name.startswith('include:'): _filename = sec_name[8:].strip() _file = os.path.join(self._basepath, _filename) if os.path.exists(_file): old_encoding = self._encoding old_filename = self.filename self.set_filename(_file) self.read(_file, filename=_file) self.set_filename(old_filename) self._encoding = old_encoding else: import warnings warnings.warn(Warning("Can't find the file [%s], so just skip it" % _filename), stacklevel=2) continue info = RawValue(self._inifile, lineno, sec_name) section = self.add(sec_name, comments, info=info) comments = [] elif '=' in line: if section is None: raise Exception("No section found, please define it first in %s file" % self.filename) #if find <=, then it'll replace the old value for mutable variables #because the default behavior will merge list and dict pos = line.find('<=') if pos != -1: begin, end = pos, pos+2 replace_flag = True else: pos = line.find('=') begin, end = pos, pos+1 replace_flag = False keyname = line[:begin].strip() #check keyname if keyname in self._env: raise KeyError("Settings key %s is alread defined in env, please change it's name" % keyname) rest = line[end:].strip() #if key= then value will be set '' if rest == '': value = 'None' else: f.seek(lastpos+end) try: value, iden_existed = self.__read_line(f) #add pre variables process value = self._pre_var(value) except Exception as e: print_exc() raise Exception("Parsing ini file error in %s:%d:%s" % (filename or self._inifile, lineno, line)) if self._lazy: if iden_existed: v = EvalValue(value, filename or self._inifile, lineno, line) else: v = value else: if self._raw: v = RawValue(self._inifile, lineno, value, replace_flag) else: try: v = eval_value(value, self.env(), self[sec_name], self._encoding, self._import_env) except Exception as e: print_exc() raise Exception("Converting value (%s) error in %s:%d:%s" % (value, filename or self._inifile, lineno, line)) section.add(keyname, v, comments, replace=replace_flag) comments = [] else: comments.append(line) def save(self, filename=None): if not filename: filename = self.filename if not filename: filename = sys.stdout if isinstance(filename, (str, unicode)): f = open(filename, 'wb') need_close = True else: f = filename need_close = False print >> f, '#coding=%s' % self._encoding for s in self.keys(): if s in self._env: continue section = self[s] section.dumps(f, convertors=self._convertors) if need_close: f.close() def __read_line(self, f): """ Get logic line according the syntax not the physical line It'll return the line text and if there is identifier existed return line, bool """ g = tokenize.generate_tokens(f.readline) buf = [] time = 0 iden_existed = False while 1: v = g.next() tokentype, t, start, end, line = v if tokentype == 54: continue if tokentype in (token.INDENT, token.DEDENT, tokenize.COMMENT): continue if tokentype == token.NAME: iden_existed = True if tokentype == token.NEWLINE: return ''.join(buf), iden_existed else: if t == '=' and time == 0: time += 1 continue buf.append(t) def __setitem__(self, key, value): if key not in self: super(Ini, self).__setitem__(key, value) def update(self, value): for k, v in value.items(): self.set_var(k, v) def add(self, sec_name, comments=None, info=None): if sec_name in self: section = self[sec_name] else: section = Section(sec_name, comments, self._encoding, root=self, info=info) self[sec_name] = section return section def __str__(self): buf = StringIO.StringIO() self.save(buf) return buf.getvalue() def get_var(self, key, default=None): obj = self for i in key.split('/', 1): obj = obj.get(i) if obj is None: break if obj is None: return default return obj def set_var(self, key, value): s = key.split('/', 1) obj = self for i in s[:-1]: obj = obj.add(i) obj[s[-1]] = value return True def del_var(self, key): s = key.split('/', 1) obj = self for i in s[:-1]: obj = obj.get(i) if obj is None: return False if s[-1] in obj: del obj[s[-1]] flag = True else: flag = False return flag def items(self): return ((k, self[k]) for k in self.keys() if not k in self._env) def env(self): if self._import_env: d = {} d.update(os.environ.copy()) d.update(dict(self)) return d return self def freeze(self): """ Process all EvalValue to real value """ self._lazy = False for k, v in self.items(): if k in self._env: continue for _k, _v in v.items(): if isinstance(_v, Lazy): if self.writable: _v.get() else: v.__setitem__(_k, _v.get(), replace=True) del _v self._globals = SortedDict()
import sys import re, sys, random from sorteddict import SortedDict # {0x79, 0x56, 0x34, 0x12} def rand_hw(): res = [] for i in range(4): res.append(random.randint(1, 254)) #res.insert(0, random.randint(1, 254)) res.sort(reverse=True) return "{" + ",".join([hex(x) for x in res]) + "}" DATA = SortedDict() DATA["CONFIG_FREQUENCY"] = { "name": "Frequency", "depends": [], "default": 902, "type": "choices", "values": [902, 868, 433], "help": "Radio frequency for the clock" } DATA["OPTION_TIME_DISPLAY"] = { "name": "Time display options", "depends": [], "default": 0,
import time, re from datetime import tzinfo, timedelta, datetime, date, time as time_ from sorteddict import SortedDict __timezone__ = None __local_timezone__ = None __timezones__ = SortedDict() class DateError(Exception): pass class TimeFormatError(Exception): pass DEFAULT_DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.5200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%Y/%m/%d %H:%M:%S', # '2006/10/25 14:30:59' '%Y/%m/%d %H:%M:%S.%f', # '2006/10/25 14:30:59.5200' '%Y/%m/%d %H:%M', # '2006/10/25 14:30' '%Y/%m/%d', # '2006/10/25 ' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.5200' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%Y', # '10/25/2006' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
class Ini(SortedDict): def __init__(self, inifile='', commentchar=None, encoding=None, env=None, convertors=None, lazy=False, writable=False, raw=False, import_env=True, basepath='.', pre_variables=None): """ lazy is used to parse first but not deal at time, and only when the user invoke finish() function, it'll parse the data. import_env will import all environment variables if inifile is dict, then automatically add to ini object """ super(Ini, self).__init__() if isinstance(inifile, dict): self._inifile = '' data = inifile else: self._inifile = inifile data = None self._basepath = basepath self._commentchar = commentchar or __default_env__.get( 'commentchar', '#') self._encoding = encoding or __default_env__.get('encoding', 'utf-8') self._env = __default_env__.get('env', {}).copy() self._env.update(env or {}) self._env['set'] = set self.update(self._env) self._globals = SortedDict() self._pre_variables = pre_variables or {} self._import_env = import_env if self._import_env: self._globals.update(os.environ) self._convertors = __default_env__.get('convertors', {}).copy() self._convertors.update(convertors or {}) self._lazy = lazy self._writable = writable self._raw = raw if lazy: self._globals.update(self._env.copy()) if self._inifile: self.read(self._inifile) if data: for k, v in data.items(): s = self.add(k) for _k, _v in v.items(): s[_k] = _v def set_filename(self, filename): self._inifile = filename def get_filename(self): return self._inifile def set_basepath(self, basepath): self._basepath = basepath def set_pre_variables(self, v): self._pre_variables = v or {} filename = property(get_filename, set_filename) def _pre_var(self, value): """ replace predefined variables, the format is #{name} """ def sub_(m): return self._pre_variables.get(m.group()[2:-1].strip(), '') return r_pre_var.sub(sub_, value) def read(self, fobj, filename=''): encoding = None if isinstance(fobj, (str, unicode)): f = open(fobj, 'rb') text = f.read() f.close() else: text = fobj.read() text = text + '\n' begin = 0 if text.startswith(codecs.BOM_UTF8): begin = 3 encoding = 'UTF-8' elif text.startswith(codecs.BOM_UTF16): begin = 2 encoding = 'UTF-16' if not encoding: try: unicode(text, 'UTF-8') encoding = 'UTF-8' except: encoding = defaultencoding self._encoding = encoding f = StringIO.StringIO(text) f.seek(begin) lineno = 0 comments = [] status = 'c' section = None while 1: lastpos = f.tell() line = f.readline() lineno += 1 if not line: break line = line.strip() if line: if line.startswith(self._commentchar): if lineno == 1: #first comment line b = r_encoding.search(line[1:]) if b: self._encoding = b.groups()[0] continue comments.append(line) elif line.startswith('[') and line.endswith(']'): sec_name = line[1:-1].strip() #process include notation if sec_name.startswith('include:'): _filename = sec_name[8:].strip() _file = os.path.join(self._basepath, _filename) if os.path.exists(_file): old_encoding = self._encoding old_filename = self.filename self.set_filename(_file) self.read(_file, filename=_file) self.set_filename(old_filename) self._encoding = old_encoding else: import warnings warnings.warn(Warning( "Can't find the file [%s], so just skip it" % _filename), stacklevel=2) continue info = RawValue(self._inifile, lineno, sec_name) section = self.add(sec_name, comments, info=info) comments = [] elif '=' in line: if section is None: raise Exception( "No section found, please define it first in %s file" % self.filename) #if find <=, then it'll replace the old value for mutable variables #because the default behavior will merge list and dict pos = line.find('<=') if pos != -1: begin, end = pos, pos + 2 replace_flag = True else: pos = line.find('=') begin, end = pos, pos + 1 replace_flag = False keyname = line[:begin].strip() #check keyname if keyname in self._env: raise KeyError( "Settings key %s is alread defined in env, please change it's name" % keyname) rest = line[end:].strip() #if key= then value will be set '' if rest == '': value = 'None' else: f.seek(lastpos + end) try: value, iden_existed = self.__read_line(f) #add pre variables process value = self._pre_var(value) except Exception as e: print_exc() raise Exception( "Parsing ini file error in %s:%d:%s" % (filename or self._inifile, lineno, line)) if self._lazy: if iden_existed: v = EvalValue(value, filename or self._inifile, lineno, line) else: v = value else: if self._raw: v = RawValue(self._inifile, lineno, value, replace_flag) else: try: v = eval_value(value, self.env(), self[sec_name], self._encoding, self._import_env) except Exception as e: print_exc() raise Exception( "Converting value (%s) error in %s:%d:%s" % (value, filename or self._inifile, lineno, line)) section.add(keyname, v, comments, replace=replace_flag) comments = [] else: comments.append(line) def save(self, filename=None): if not filename: filename = self.filename if not filename: filename = sys.stdout if isinstance(filename, (str, unicode)): f = open(filename, 'wb') need_close = True else: f = filename need_close = False print >> f, '#coding=%s' % self._encoding for s in self.keys(): if s in self._env: continue section = self[s] section.dumps(f, convertors=self._convertors) if need_close: f.close() def __read_line(self, f): """ Get logic line according the syntax not the physical line It'll return the line text and if there is identifier existed return line, bool """ g = tokenize.generate_tokens(f.readline) buf = [] time = 0 iden_existed = False while 1: v = g.next() tokentype, t, start, end, line = v if tokentype == 54: continue if tokentype in (token.INDENT, token.DEDENT, tokenize.COMMENT): continue if tokentype == token.NAME: iden_existed = True if tokentype == token.NEWLINE: return ''.join(buf), iden_existed else: if t == '=' and time == 0: time += 1 continue buf.append(t) def __setitem__(self, key, value): if key not in self: super(Ini, self).__setitem__(key, value) def update(self, value): for k, v in value.items(): self.set_var(k, v) def add(self, sec_name, comments=None, info=None): if sec_name in self: section = self[sec_name] else: section = Section(sec_name, comments, self._encoding, root=self, info=info) self[sec_name] = section return section def __str__(self): buf = StringIO.StringIO() self.save(buf) return buf.getvalue() def get_var(self, key, default=None): obj = self for i in key.split('/', 1): obj = obj.get(i) if obj is None: break if obj is None: return default return obj def set_var(self, key, value): s = key.split('/', 1) obj = self for i in s[:-1]: obj = obj.add(i) obj[s[-1]] = value return True def del_var(self, key): s = key.split('/', 1) obj = self for i in s[:-1]: obj = obj.get(i) if obj is None: return False if s[-1] in obj: del obj[s[-1]] flag = True else: flag = False return flag def items(self): return ((k, self[k]) for k in self.keys() if not k in self._env) def env(self): if self._import_env: d = {} d.update(os.environ.copy()) d.update(dict(self)) return d return self def freeze(self): """ Process all EvalValue to real value """ self._lazy = False for k, v in self.items(): if k in self._env: continue for _k, _v in v.items(): if isinstance(_v, Lazy): if self.writable: _v.get() else: try: v.__setitem__(_k, _v.get(), replace=True) except: print "Error ini key:", _k raise del _v self._globals = SortedDict()
def __init__(self, db, fieldspecs=None): if isinstance(db, basestring): db = open(db, 'w+b') self.db = db self.fields = fieldspecs # try to read the header infos from the db header = db.read(32) if header: header = struct.unpack(self.header_fmt, header) # if the user passed fieldspecs if self.fields: # obtain dbf meta data from them self.numfields = len(self.fields) self.lenheader = self.numfields * 32 + 33 self.lenrecord = 1 self.record_fmt = '1s' for field in self.fields.itervalues(): self.lenrecord += field.size self.record_fmt += '%ds' % field.size # if we have an header if header: # check the header's infos with the ones we have obtained from # our fieldspecs self.numrec, lenheader, lenrecord = header[-3:] assert (lenheader == self.lenheader and lenrecord == self.lenrecord), \ "database's fields doesn't match provided fields" # if no header is present, write it else: self.numrec = 0 # header now = datetime.datetime.now() y, m, d = now.year-1900, now.month, now.day header = struct.pack(self.header_fmt, self.version, y, m, d, self.numrec, self.lenheader, self.lenrecord) self.db.write(header) # field specs for fname, field in self.fields.iteritems(): fname = fname.ljust(11, '\0') field = struct.pack(self.fields_fmt, fname, field.type, field.size, field.deci) self.db.write(field) self.db.write('\r\x1A') else: # if we have no fieldspecs, but we have an header in our dbf, # obtain the fieldspecs from it. if header: self.numrec, self.lenheader, self.lenrecord = header[-3:] self.numfields = (self.lenheader - 33) // 32 self.fields = SortedDict() self.record_fmt = '1s' for fieldno in xrange(self.numfields): fieldinfo = struct.unpack(self.fields_fmt, db.read(32)) name, type, size, deci = fieldinfo name = name.partition('\0')[0] self.fields[name] = fields.guessField(type, size, deci) self.record_fmt += '%ds' % size else: # if we have no header and no fieldspecs, we can't help it ... raise TypeError("nor fields or header present") i = 0 self._fieldpos = [] for field in self.fields.itervalues(): self._fieldpos.append(i) i += field.size
def test_acts_as_dict(values): items = list(zip(range(len(values)), values)) d = SortedDict(items) for k, v in items: assert d[k] == v
class DBF(object): version = 3 header_fmt = '<BBBBLHH20x' fields_fmt = '<11sc4xBB14x' def __init__(self, db, fieldspecs=None): if isinstance(db, basestring): db = open(db, 'w+b') self.db = db self.fields = fieldspecs # try to read the header infos from the db header = db.read(32) if header: header = struct.unpack(self.header_fmt, header) # if the user passed fieldspecs if self.fields: # obtain dbf meta data from them self.numfields = len(self.fields) self.lenheader = self.numfields * 32 + 33 self.lenrecord = 1 self.record_fmt = '1s' for field in self.fields.itervalues(): self.lenrecord += field.size self.record_fmt += '%ds' % field.size # if we have an header if header: # check the header's infos with the ones we have obtained from # our fieldspecs self.numrec, lenheader, lenrecord = header[-3:] assert (lenheader == self.lenheader and lenrecord == self.lenrecord), \ "database's fields doesn't match provided fields" # if no header is present, write it else: self.numrec = 0 # header now = datetime.datetime.now() y, m, d = now.year - 1900, now.month, now.day header = struct.pack(self.header_fmt, self.version, y, m, d, self.numrec, self.lenheader, self.lenrecord) self.db.write(header) # field specs for fname, field in self.fields.iteritems(): fname = fname.ljust(11, '\0') field = struct.pack(self.fields_fmt, fname, field.type, field.size, field.deci) self.db.write(field) self.db.write('\r\x1A') else: # if we have no fieldspecs, but we have an header in our dbf, # obtain the fieldspecs from it. if header: self.numrec, self.lenheader, self.lenrecord = header[-3:] self.numfields = (self.lenheader - 33) // 32 self.fields = SortedDict() self.record_fmt = '1s' for fieldno in xrange(self.numfields): fieldinfo = struct.unpack(self.fields_fmt, db.read(32)) name, type, size, deci = fieldinfo name = name.partition('\0')[0] self.fields[name] = fields.guessField(type, size, deci) self.record_fmt += '%ds' % size else: # if we have no header and no fieldspecs, we can't help it ... raise TypeError("nor fields or header present") i = 0 self._fieldpos = [] for field in self.fields.itervalues(): self._fieldpos.append(i) i += field.size def gotoField(self, fname): i = self.fields.keyOrder.index(fname) self.db.seek(self._currec + 1 + self._fieldpos[i]) def newID(self): """ return a new record ID. """ self.db.seek(4) self.numrec = struct.unpack('<L', self.db.read(4))[0] return self.numrec def increase_numrec(self): self.numrec += 1 self.db.seek(4) self.db.write(struct.pack('<L', self.numrec)) def gotoRecord(self, recIndex): """ move before the record specified by the index recIndex """ if recIndex > self.numrec: raise KeyError(recIndex) self._currec = self.lenheader + self.lenrecord * (recIndex) self.db.seek(self._currec) def update(self, record): recId = record['pk'] self.gotoRecord(recId) dflag = self.db.read(1) for fname, field in self.fields.iteritems(): if fname in record: self.db.write(field.encode(record[fname])) self.db.flush() else: self.db.seek(field.size, 1) self.db.flush() def insert(self, record): recId = self.newID() self.gotoRecord(recId) record['pk'] = recId self.db.write(' ') data = '' for fname, field in self.fields.iteritems(): data += field.encode(record[fname]) self.db.write(data) self.db.write('\x1A') self.increase_numrec() self.db.flush() def _iterselect(self, fields=None): for recId in xrange(self.numrec): yield self.select(recId, fields) def select(self, recId=None, fields=None): if recId is None: return self._iterselect(fields) if not recId in self: raise KeyError(recId) if not fields: fields = self.fields.keys() self.gotoRecord(recId) res = {'pk': recId} self.db.read(1) for fname, field in self.fields.iteritems(): if fname in fields: res[fname] = field.decode(self.db.read(field.size)) else: self.db.seek(field.size, 1) return res def close(self): self.db.close() def __contains__(self, recId): if isinstance(recId, int) and recId < self.numrec: return True return False def __iter__(self): for i in xrange(self.numrec): yield self.select(i) def __len__(self): return self.numrec def __getitem__(self, recordID): return self.select(recordID) def __setitem__(self, recordID, dict): self.select(recordID, dict)
def __init__(self, db, fieldspecs=None): if isinstance(db, basestring): db = open(db, 'w+b') self.db = db self.fields = fieldspecs # try to read the header infos from the db header = db.read(32) if header: header = struct.unpack(self.header_fmt, header) # if the user passed fieldspecs if self.fields: # obtain dbf meta data from them self.numfields = len(self.fields) self.lenheader = self.numfields * 32 + 33 self.lenrecord = 1 self.record_fmt = '1s' for field in self.fields.itervalues(): self.lenrecord += field.size self.record_fmt += '%ds' % field.size # if we have an header if header: # check the header's infos with the ones we have obtained from # our fieldspecs self.numrec, lenheader, lenrecord = header[-3:] assert (lenheader == self.lenheader and lenrecord == self.lenrecord), \ "database's fields doesn't match provided fields" # if no header is present, write it else: self.numrec = 0 # header now = datetime.datetime.now() y, m, d = now.year - 1900, now.month, now.day header = struct.pack(self.header_fmt, self.version, y, m, d, self.numrec, self.lenheader, self.lenrecord) self.db.write(header) # field specs for fname, field in self.fields.iteritems(): fname = fname.ljust(11, '\0') field = struct.pack(self.fields_fmt, fname, field.type, field.size, field.deci) self.db.write(field) self.db.write('\r\x1A') else: # if we have no fieldspecs, but we have an header in our dbf, # obtain the fieldspecs from it. if header: self.numrec, self.lenheader, self.lenrecord = header[-3:] self.numfields = (self.lenheader - 33) // 32 self.fields = SortedDict() self.record_fmt = '1s' for fieldno in xrange(self.numfields): fieldinfo = struct.unpack(self.fields_fmt, db.read(32)) name, type, size, deci = fieldinfo name = name.partition('\0')[0] self.fields[name] = fields.guessField(type, size, deci) self.record_fmt += '%ds' % size else: # if we have no header and no fieldspecs, we can't help it ... raise TypeError("nor fields or header present") i = 0 self._fieldpos = [] for field in self.fields.itervalues(): self._fieldpos.append(i) i += field.size
class DBF(object): version = 3 header_fmt = '<BBBBLHH20x' fields_fmt = '<11sc4xBB14x' def __init__(self, db, fieldspecs=None): if isinstance(db, basestring): db = open(db, 'w+b') self.db = db self.fields = fieldspecs # try to read the header infos from the db header = db.read(32) if header: header = struct.unpack(self.header_fmt, header) # if the user passed fieldspecs if self.fields: # obtain dbf meta data from them self.numfields = len(self.fields) self.lenheader = self.numfields * 32 + 33 self.lenrecord = 1 self.record_fmt = '1s' for field in self.fields.itervalues(): self.lenrecord += field.size self.record_fmt += '%ds' % field.size # if we have an header if header: # check the header's infos with the ones we have obtained from # our fieldspecs self.numrec, lenheader, lenrecord = header[-3:] assert (lenheader == self.lenheader and lenrecord == self.lenrecord), \ "database's fields doesn't match provided fields" # if no header is present, write it else: self.numrec = 0 # header now = datetime.datetime.now() y, m, d = now.year-1900, now.month, now.day header = struct.pack(self.header_fmt, self.version, y, m, d, self.numrec, self.lenheader, self.lenrecord) self.db.write(header) # field specs for fname, field in self.fields.iteritems(): fname = fname.ljust(11, '\0') field = struct.pack(self.fields_fmt, fname, field.type, field.size, field.deci) self.db.write(field) self.db.write('\r\x1A') else: # if we have no fieldspecs, but we have an header in our dbf, # obtain the fieldspecs from it. if header: self.numrec, self.lenheader, self.lenrecord = header[-3:] self.numfields = (self.lenheader - 33) // 32 self.fields = SortedDict() self.record_fmt = '1s' for fieldno in xrange(self.numfields): fieldinfo = struct.unpack(self.fields_fmt, db.read(32)) name, type, size, deci = fieldinfo name = name.partition('\0')[0] self.fields[name] = fields.guessField(type, size, deci) self.record_fmt += '%ds' % size else: # if we have no header and no fieldspecs, we can't help it ... raise TypeError("nor fields or header present") i = 0 self._fieldpos = [] for field in self.fields.itervalues(): self._fieldpos.append(i) i += field.size def gotoField(self, fname): i = self.fields.keyOrder.index(fname) self.db.seek(self._currec + 1 + self._fieldpos[i]) def newID(self): """ return a new record ID. """ self.db.seek(4) self.numrec = struct.unpack('<L', self.db.read(4))[0] return self.numrec def increase_numrec(self): self.numrec += 1 self.db.seek(4) self.db.write(struct.pack('<L', self.numrec)) def gotoRecord(self, recIndex): """ move before the record specified by the index recIndex """ if recIndex > self.numrec: raise KeyError(recIndex) self._currec = self.lenheader + self.lenrecord * (recIndex) self.db.seek(self._currec) def update(self, record): recId = record['pk'] self.gotoRecord(recId) dflag = self.db.read(1) for fname, field in self.fields.iteritems(): if fname in record: self.db.write(field.encode(record[fname])) self.db.flush() else: self.db.seek(field.size, 1) self.db.flush() def insert(self, record): recId = self.newID() self.gotoRecord(recId) record['pk'] = recId self.db.write(' ') data = '' for fname, field in self.fields.iteritems(): data += field.encode(record[fname]) self.db.write(data) self.db.write('\x1A') self.increase_numrec() self.db.flush() def _iterselect(self, fields=None): for recId in xrange(self.numrec): yield self.select(recId, fields) def select(self, recId=None, fields=None): if recId is None: return self._iterselect(fields) if not recId in self: raise KeyError(recId) if not fields: fields = self.fields.keys() self.gotoRecord(recId) res = {'pk': recId} self.db.read(1) for fname, field in self.fields.iteritems(): if fname in fields: res[fname] = field.decode(self.db.read(field.size)) else: self.db.seek(field.size, 1) return res def close(self): self.db.close() def __contains__(self, recId): if isinstance(recId, int) and recId < self.numrec: return True return False def __iter__(self): for i in xrange(self.numrec): yield self.select(i) def __len__(self): return self.numrec def __getitem__(self, recordID): return self.select(recordID) def __setitem__(self, recordID, dict): self.select(recordID, dict)
def __init__(self, app, root_dir=None, file_ext=None): self.root_dir = root_dir if root_dir is not None else app.config['POSTS_DIRECTORY'] self.file_ext = file_ext if file_ext is not None else app.config['POSTS_FILE_EXTENSION'] self._app = app self._cache = SortedDict(key=lambda p: p.step) self._initialise_cache()
class Ini(SortedDict): def __init__(self, inifile='', commentchar=None, encoding=None, env=None, convertors=None, lazy=False, writable=False, raw=False, import_env=True): """ lazy is used to parse first but not deal at time, and only when the user invoke finish() function, it'll parse the data. import_env will import all environment variables """ super(Ini, self).__init__() self._inifile = inifile self._commentchar = commentchar or __default_env__.get( 'commentchar', '#') self._encoding = encoding or __default_env__.get('encoding', 'utf-8') self._env = __default_env__.get('env', {}).copy() self._env.update(env or {}) self._env['set'] = set self.update(self._env) self._globals = SortedDict() self._import_env = import_env if self._import_env: self._globals.update(os.environ) self._convertors = __default_env__.get('convertors', {}).copy() self._convertors.update(convertors or {}) self._lazy = lazy self._writable = writable self._raw = raw if lazy: self._globals.update(self._env.copy()) if self._inifile: self.read(self._inifile) def set_filename(self, filename): self._inifile = filename def get_filename(self): return self._inifile filename = property(get_filename, set_filename) def read(self, fobj, filename=''): encoding = None if isinstance(fobj, (str, unicode)): f = open(fobj, 'rb') text = f.read() f.close() else: text = fobj.read() text = text + '\n' begin = 0 if text.startswith(codecs.BOM_UTF8): begin = 3 encoding = 'UTF-8' elif text.startswith(codecs.BOM_UTF16): begin = 2 encoding = 'UTF-16' if not encoding: try: unicode(text, 'UTF-8') encoding = 'UTF-8' except: encoding = defaultencoding self._encoding = encoding f = StringIO.StringIO(text) f.seek(begin) lineno = 0 comments = [] status = 'c' section = None while 1: lastpos = f.tell() line = f.readline() lineno += 1 if not line: break line = line.strip() if line: if line.startswith(self._commentchar): if lineno == 1: #first comment line b = r_encoding.search(line[1:]) if b: self._encoding = b.groups()[0] continue comments.append(line) elif line.startswith('[') and line.endswith(']'): sec_name = line[1:-1].strip() #process include notation if sec_name.startswith('include:'): _filename = sec_name[8:].strip() _filename = os.path.abspath(_filename) if os.path.exists(_filename): old_encoding = self._encoding self.read(_filename) self._encoding = old_encoding else: import warnings warnings.warn(Warning( "Can't find the file [%s], so just skip it" % _filename), stacklevel=2) continue info = RawValue(self._inifile, lineno, sec_name) section = self.add(sec_name, comments, info=info) comments = [] elif '=' in line: if section is None: raise Exception, "No section found, please define it first in %s file" % self.filename #if find <=, then it'll replace the old value for mutable variables #because the default behavior will merge list and dict pos = line.find('<=') if pos != -1: begin, end = pos, pos + 2 replace_flag = True else: pos = line.find('=') begin, end = pos, pos + 1 replace_flag = False keyname = line[:begin].strip() #check keyname if keyname in self._env: raise KeyError( "Settings key %s is alread defined in env, please change it's name" % keyname) rest = line[end:].strip() #if key= then value will be set '' if rest == '': value = 'None' else: f.seek(lastpos + end) try: value, iden_existed = self.__read_line(f) except Exception, e: print_exc() raise Exception, "Parsing ini file error in %s:%d:%s" % ( filename or self._inifile, lineno, line) if self._lazy: if iden_existed: v = EvalValue(value, filename or self._inifile, lineno, line) else: v = value else: if self._raw: v = RawValue(self._inifile, lineno, value, replace_flag) else: try: v = eval_value(value, self.env(), self[sec_name], self._encoding, self._import_env) except Exception as e: print_exc() print dict(self) raise Exception( "Converting value (%s) error in %s:%d:%s" % (value, filename or self._inifile, lineno, line)) section.add(keyname, v, comments, replace=replace_flag) comments = [] else: comments.append(line)
def test_update(self): a = SortedDict({'a': 1}) expected = SortedDict({'a': 1, 'b': 2}) a.update({'b': 2}) self.assertEqual(a, expected)
def test_clear(self): a = SortedDict({'a': 1, 'b': 2}) a.clear() self.assertEqual(len(a), 0) self.assertTrue('a' not in a)
def test_keys_values_items(self): a = SortedDict({'a': 1, 'b': 2}) self.assertListEqual(a.keys(), ['a', 'b']) self.assertListEqual(a.values(), [1, 2]) self.assertListEqual(a.items(), [('a', 1), ('b', 2)])
#!/usr/bin/env python2 # encoding: utf-8 # vim: ts=4 noexpandtab import urwid import urwid.raw_display import sys import re, sys, random from sorteddict import SortedDict import modules import field_encodings DATA = SortedDict() WIDMAP = {} # GENERAL CONFIG ############################################################ DATA["TEXT_GENERAL"] = { "name": "General options", "type": "info", } DATA["CONFIG_DEBUG"] = { "name": "Build debug code", "default": False, "help": "Sets CFLAGS and LDFLAGS for debugging", } DATA["USE_LCD_CHARGE_PUMP"] = {
class Reactor(object): def __init__(self, redis, events, periodics=[], select=[]): self.logger = logging.getLogger('reactor') self.selector = Selector(select) self.db = ReactorDB(redis) self.mapper = dict(self.mapper_gen(events)) self.periodics = periodics self.timeline = SortedDict() self.load() def time(self): return itime() def mapper_gen(self, events): for event in events: yield event.type(), event def __getitem__(self, name): return self.selector.get(name) def add_to_queue(self, event, time): self.selector.process(event) self.get(time).append(event) def load(self): for time, event_queue in self.db.event_models: for event_db in event_queue: event = self.mapper.get(event_db.type.get()) if event: self.add_to_queue(event(**event_db.params.get()), time=time) def flush(self): self.timeline.clear() self.selector.clear() def get(self, time): queue = self.timeline.get(time) if queue is None: queue = self.timeline[time] = [] return queue def append(self, event, tdelta=None, time=None): time = time or self.time() + tdelta self.add_to_queue(event, time) self.db.dump(time, event) return time def wait_for_calc(self, time): done = False while self.timeline and not done: smallest_time, events = self.timeline.smallest_item() if smallest_time <= time: yield smallest_time, events else: done = True def remove_events(self, time): if time in self.timeline: del self.timeline[time] self.db.remove_item(time) def execute(self, event, time): try: event.do(self, time) except Exception: self.logger.error('%s executing at %s' % (event, time), exc_info=1) def calc(self, time=None): time = time or self.time() for event in self.periodics: self.execute(event, time) for expected_time, events in self.wait_for_calc(time): for event in events: self.execute(event, time) self.selector.remove(event) self.remove_events(expected_time)