class DiffResults(object): def __init__(self): self._stats = NormalizedDict() self.column_names = [] @property def rows(self): return (RowStatus(name, statuses) for name, statuses in sorted(self._stats.items())) def add_output(self, path, column=None): self._add_suite(ExecutionResult(path).suite) self.column_names.append(column or path) for stats in self._stats.values(): self._add_missing_statuses(stats) def _add_suite(self, suite): self._add_to_stats(suite) for sub_suite in suite.suites: self._add_suite(sub_suite) for test in suite.tests: self._add_to_stats(test) def _add_to_stats(self, item): stats = self._stats.setdefault(item.longname, []) self._add_missing_statuses(stats) stats.append(ItemStatus(item)) def _add_missing_statuses(self, stats): while len(stats) < len(self.column_names): stats.append(MissingStatus())
def test_iterkeys_and_keys(self): nd = NormalizedDict({'A': 1, 'b': 3, 'C': 2}) iterator = nd.iterkeys() assert_false(isinstance(iterator, list)) assert_equal(list(iterator), ['A', 'b', 'C']) assert_equal(list(iterator), []) assert_equal(list(nd.iterkeys()), nd.keys())
def test_itervalues_and_values(self): nd = NormalizedDict({'A': 1, 'b': 3, 'C': 2}) iterator = nd.itervalues() assert_false(isinstance(iterator, list)) assert_equal(list(iterator), [1, 3, 2]) assert_equal(list(iterator), []) assert_equal(list(nd.itervalues()), nd.values())
def test_iteritems_and_items(self): nd = NormalizedDict({'A': 1, 'b': 2, 'C': 3}) iterator = nd.iteritems() assert_false(isinstance(iterator, list)) assert_equal(list(iterator), [('A', 1), ('b', 2), ('C', 3)]) assert_equal(list(iterator), []) assert_equal(list(nd.iteritems()), nd.items())
def test_setdefault(self): nd = NormalizedDict({'a': NormalizedDict()}) nd.setdefault('a', 'whatever').setdefault('B', []).append(1) nd.setdefault('A', 'everwhat').setdefault('b', []).append(2) assert_equal(nd['a']['b'], [1, 2]) assert_equal(list(nd), ['a']) assert_equal(list(nd['a']), ['B'])
class SetVariables(object): def __init__(self): self._test = NormalizedDict(ignore='_') self._scopes = [self._test] def end_test(self): self._test.clear() def start_keyword(self): self._scopes.append(self._scopes[-1].copy()) def end_keyword(self): self._scopes.pop() def set_test(self, name, value): for scope in self._scopes: scope[name] = value def set_keyword(self, name, value): self._scopes[-1][name] = value def update_keyword(self, kw): for name, value in self._scopes[-1].items(): kw[name] = value
def test_popitem(self): items = [(str(i), i) for i in range(9)] nd = NormalizedDict(items) for i in range(9): assert_equal(nd.popitem(), items[i]) assert_equal(nd._data, {}) assert_equal(list(nd.keys()), [])
def test_setdefault(self): nd = NormalizedDict({"a": NormalizedDict()}) nd.setdefault("a", "whatever").setdefault("B", []).append(1) nd.setdefault("A", "everwhat").setdefault("b", []).append(2) assert_equals(nd["a"]["b"], [1, 2]) assert_equals(list(nd), ["a"]) assert_equals(list(nd["a"]), ["B"])
def test_iteritems_and_items(self): nd = NormalizedDict({"A": 1, "b": 2, "C": 3}) iterator = nd.iteritems() assert_false(isinstance(iterator, list)) assert_equals(list(iterator), [("A", 1), ("b", 2), ("C", 3)]) assert_equals(list(iterator), []) assert_equals(list(nd.iteritems()), nd.items())
def test_itervalues_and_values(self): nd = NormalizedDict({"A": 1, "b": 3, "C": 2}) iterator = nd.itervalues() assert_false(isinstance(iterator, list)) assert_equals(list(iterator), [1, 3, 2]) assert_equals(list(iterator), []) assert_equals(list(nd.itervalues()), nd.values())
def test_iterkeys_and_keys(self): nd = NormalizedDict({"A": 1, "b": 3, "C": 2}) iterator = nd.iterkeys() assert_false(isinstance(iterator, list)) assert_equals(list(iterator), ["A", "b", "C"]) assert_equals(list(iterator), []) assert_equals(list(nd.iterkeys()), nd.keys())
def test_update_with_kwargs(self): nd = NormalizedDict({'a': 0, 'c': 1}) nd.update({'b': 2, 'c': 3}, b=4, d=5) for k, v in [('a', 0), ('b', 4), ('c', 3), ('d', 5)]: assert_equal(nd[k], v) assert_equal(nd[k.upper()], v) assert_true(k in nd) assert_true(k.upper() in nd) assert_true(k in nd.keys())
def test_update_with_kwargs(self): nd = NormalizedDict({"a": 0, "c": 1}) nd.update({"b": 2, "c": 3}, b=4, d=5) for k, v in [("a", 0), ("b", 4), ("c", 3), ("d", 5)]: assert_equals(nd[k], v) assert_equals(nd[k.upper()], v) assert_true(k in nd) assert_true(k.upper() in nd) assert_true(k in nd.keys())
def test_update_using_another_norm_dict(self): nd = NormalizedDict({'a': 1, 'b': 1}) nd.update(NormalizedDict({'B': 2, 'C': 2})) for c in 'bc': assert_equal(nd[c], 2) assert_equal(nd[c.upper()], 2) keys = list(nd) assert_true('b' in keys) assert_true('B' not in keys) assert_true('c' not in keys) assert_true('C' in keys)
def test_update(self): nd = NormalizedDict({'a': 1, 'b': 1, 'c': 1}) nd.update({'b': 2, 'C': 2, 'D': 2}) for c in 'bcd': assert_equal(nd[c], 2) assert_equal(nd[c.upper()], 2) keys = list(nd) assert_true('b' in keys) assert_true('c' in keys) assert_true('C' not in keys) assert_true('d' not in keys) assert_true('D' in keys)
def test_copy(self): nd = NormalizedDict({'a': 1, 'B': 1}) cd = nd.copy() assert_equal(nd, cd) assert_equal(nd._data, cd._data) assert_equal(nd._keys, cd._keys) assert_equal(nd._normalize, cd._normalize) nd['C'] = 1 cd['b'] = 2 assert_equal(nd._keys, {'a': 'a', 'b': 'B', 'c': 'C'}) assert_equal(nd._data, {'a': 1, 'b': 1, 'c': 1}) assert_equal(cd._keys, {'a': 'a', 'b': 'B'}) assert_equal(cd._data, {'a': 1, 'b': 2})
def test_copy(self): nd = NormalizedDict({"a": 1, "B": 1}) cd = nd.copy() assert_equals(nd, cd) assert_equals(nd.data, cd.data) assert_equals(nd._keys, cd._keys) assert_equals(nd._normalize, cd._normalize) nd["C"] = 1 cd["b"] = 2 assert_equals(nd._keys, {"a": "a", "b": "B", "c": "C"}) assert_equals(nd.data, {"a": 1, "b": 1, "c": 1}) assert_equals(cd._keys, {"a": "a", "b": "B"}) assert_equals(cd.data, {"a": 1, "b": 2})
class TagStatistics: def __init__(self, include=None, exclude=None, combine=None, docs=None, links=None): self.stats = NormalizedDict(ignore=['_']) self._include = MultiMatcher(include, ignore=['_']) self._exclude = MultiMatcher(exclude, ignore=['_']) self._combine = combine or [] info = TagStatInfo(docs or [], links or []) self._get_doc = info.get_doc self._get_links = info.get_links def add_test(self, test, critical): self._add_tags_statistics(test, critical) self._add_combined_statistics(test) def _add_tags_statistics(self, test, critical): for tag in test.tags: if not self._is_included(tag): continue if tag not in self.stats: self.stats[tag] = TagStat(tag, self._get_doc(tag), self._get_links(tag), critical.is_critical(tag), critical.is_non_critical(tag)) self.stats[tag].add_test(test) def _is_included(self, tag): if self._include and not self._include.match(tag): return False return not self._exclude.match(tag) def _add_combined_statistics(self, test): for pattern, name in self._combine: name = name or pattern if name not in self.stats: self.stats[name] = TagStat(name, self._get_doc(name), self._get_links(name), combined=pattern) if TagPatterns(pattern).match(test.tags): self.stats[name].add_test(test) def serialize(self, serializer): serializer.start_tag_stats(self) for stat in sorted(self.stats.values()): stat.serialize(serializer) serializer.end_tag_stats(self) def sort(self): for stat in self.stats.values(): stat.tests.sort()
def convert_to_bool(self, value, *true_false, **options): if true_false: lists = NormalizedDict({'true': [], 'false': []}) # choose the first list to fill with items # based on given TRUE or FALSE specifier: try: t_or_f_list = lists[true_false[0]] except KeyError: raise ValueError("Expected TRUE or FALSE, not: %s" % repr(true_false[0])) for item in true_false[1:]: if item in lists: #==> is new TRUE or FALSE specifier #==> switch to corresponding list t_or_f_list = lists[item] if t_or_f_list: raise ValueError("Multiple %s lists specfied." % normalize(item).upper()) else: t_or_f_list.append(item) for key, items in lists.items(): if not items: raise ValueError("No %s list specified." % key.upper()) if RobotBool(options.get('normalized', True)): boolcls = normboolclass(**lists) else: boolcls = boolclass(**lists) else: boolcls = options.get('boolclass') or options.get('booltype') if not boolcls: # fallback to robot's default bool conversion return BUILTIN.convert_to_boolean(value) if isstring(boolcls): try: # is a registered bool class name? boolcls = BOOL_CLASSES[boolcls] except KeyError: if '.' not in boolcls: raise ValueError( "No such bool class registered: '%s'" % boolcls) modname, clsname = boolcls.rsplit('.', 1) try: # is an importable 'module.class' string? boolcls = getattr(__import__(modname), clsname) except (ImportError, AttributeError): raise ValueError( "Can't import bool class: '%s'" % boolcls) elif not isboolclass(boolcls): raise TypeError("No bool class: %s" % repr(boolcls)) BUILTIN._log_types(value) return boolcls(value)
class HandlerStore(object): def __init__(self, source): self._source = source self._normal = NormalizedDict(ignore='_') self._embedded = [] def add(self, handler, embedded=False): if embedded: self._embedded.append(handler) elif handler.name not in self._normal: self._normal[handler.name] = handler else: error = 'Keyword with same name defined multiple times.' self._normal[handler.name] = UserErrorHandler(handler.name, error) raise DataError(error) def __iter__(self): return iter(sorted(self._normal.values() + self._embedded, key=attrgetter('name'))) def __len__(self): return len(self._normal) + len(self._embedded) def __contains__(self, name): if name in self._normal: return True return any(template.matches(name) for template in self._embedded) def __getitem__(self, name): try: return self._normal[name] except KeyError: return self._find_embedded(name) def _find_embedded(self, name): embedded = [template.create(name) for template in self._embedded if template.matches(name)] if len(embedded) == 1: return embedded[0] self._raise_no_single_match(name, embedded) def _raise_no_single_match(self, name, found): if self._source is None: where = "Test case file" elif self._is_resource(self._source): where = "Resource file '%s'" % self._source else: where = "Test library '%s'" % self._source if not found: raise DataError("%s contains no keywords matching name '%s'." % (where, name)) error = ["%s contains multiple keywords matching name '%s':" % (where, name)] names = sorted(handler.orig_name for handler in found) raise DataError('\n '.join(error + names)) def _is_resource(self, source): extension = splitext(source)[1][1:].lower() return extension in RESOURCE_EXTENSIONS
def __init__(self, combined_stats): #: Dictionary, where key is the name of the tag as a string and value #: is an instance of :class:`~robot.model.stats.TagStat`. self.tags = NormalizedDict(ignore=['_']) #: Dictionary, where key is the name of the created tag as a string # and value is an instance of :class:`~robot.model.stats.TagStat`. self.combined = combined_stats
class TagStatistics(object): """Container for tag statistics.""" def __init__(self, critical_stats, non_critical_stats, combined_stats): #: Dictionary, where key is the name of the tag as a string and value #: is an instance of :class:`~robot.model.stats.TagStat`. self.tags = NormalizedDict(ignore='_') #: List of :class:`~robot.model.stats.CriticalTagStat` objects. self.critical = critical_stats #: List of :class:`~robot.model.stats.CriticalTagStat` objects. self.non_critical = non_critical_stats #: List of :class:`~robot.model.stats.CombinedTagStat` objects. self.combined = combined_stats def visit(self, visitor): visitor.visit_tag_statistics(self) def __iter__(self): crits = self._get_critical_and_non_critical_matcher() tags = [t for t in self.tags.values() if t.name not in crits] return iter(sorted(chain(self.critical, self.non_critical, self.combined, tags))) def _get_critical_and_non_critical_matcher(self): crits = [stat for stat in self.critical + self.non_critical if isinstance(stat.pattern, SingleTagPattern)] return NormalizedDict([(unicode(stat.pattern), None) for stat in crits], ignore='_')
class HandlerStore(object): TEST_LIBRARY_TYPE = 'Test library' TEST_CASE_FILE_TYPE = 'Test case file' RESOURCE_FILE_TYPE = 'Resource file' def __init__(self, source, source_type): self.source = source self.source_type = source_type self._normal = NormalizedDict(ignore='_') self._embedded = [] def add(self, handler, embedded=False): if embedded: self._embedded.append(handler) elif handler.name not in self._normal: self._normal[handler.name] = handler else: error = 'Keyword with same name defined multiple times.' self._normal[handler.name] = UserErrorHandler(handler.name, error, handler.libname) raise DataError(error) def __iter__(self): return iter(sorted(self._normal.values() + self._embedded, key=attrgetter('name'))) def __len__(self): return len(self._normal) + len(self._embedded) def __contains__(self, name): if name in self._normal: return True return any(template.matches(name) for template in self._embedded) def __getitem__(self, name): try: return self._normal[name] except KeyError: return self._find_embedded(name) def _find_embedded(self, name): embedded = [template.create(name) for template in self._embedded if template.matches(name)] if len(embedded) == 1: return embedded[0] self._raise_no_single_match(name, embedded) def _raise_no_single_match(self, name, found): if self.source_type == self.TEST_CASE_FILE_TYPE: source = self.source_type else: source = "%s '%s'" % (self.source_type, self.source) if not found: raise DataError("%s contains no keywords matching name '%s'." % (source, name)) error = ["%s contains multiple keywords matching name '%s':" % (source, name)] names = sorted(handler.orig_name for handler in found) raise DataError('\n '.join(error + names))
def test_keys_values_and_items_are_returned_in_same_order(self): nd = NormalizedDict() for i, c in enumerate('abcdefghijklmnopqrstuvwxyz0123456789!"#%&/()=?'): nd[c.upper()] = i nd[c+str(i)] = 1 assert_equals(nd.items(), zip(nd.keys(), nd.values())) assert_equals(list(nd.iteritems()), zip(nd.iterkeys(), nd.itervalues()))
def __init__(self, include=None, exclude=None, combine=None, docs=None, links=None): self.stats = NormalizedDict(ignore=['_']) self._include = MultiMatcher(include, ignore=['_']) self._exclude = MultiMatcher(exclude, ignore=['_']) self._combine = combine or [] info = TagStatInfo(docs or [], links or []) self._get_doc = info.get_doc self._get_links = info.get_links
class VariableStore(object): def __init__(self, variables): self.store = NormalizedDict(ignore='_') self._variables = variables def resolve_delayed(self): for name, value in self.store.items(): try: self._resolve_delayed(name, value) except DataError: pass def _resolve_delayed(self, name, value): if not isinstance(value, DelayedVariable): return value self.store[name] = value.resolve(name, self._variables) return self.store[name] def find(self, name): return self._resolve_delayed(name, self.store[name]) def __getitem__(self, name): return self.find(name) # TODO: __getitem__ vs find def clear(self): self.store.clear() def add(self, name, value, overwrite=True): if overwrite or name not in self.store: self.store[name] = value def remove(self, name): if name in self.store: self.store.pop(name) def __len__(self): return len(self.store) def __iter__(self): return iter(self.store) def __contains__(self, name): return name in self.store
def __init__(self, critical_stats, non_critical_stats, combined_stats): #: Dictionary, where key is the name of the tag as a string and value #: is an instance of :class:`~robot.model.stats.TagStat`. self.tags = NormalizedDict(ignore='_') #: List of :class:`~robot.model.stats.CriticalTagStat` objects. self.critical = critical_stats #: List of :class:`~robot.model.stats.CriticalTagStat` objects. self.non_critical = non_critical_stats #: List of :class:`~robot.model.stats.CombinedTagStat` objects. self.combined = combined_stats
class TagStatistics(object): def __init__(self, combined_stats): self.tags = NormalizedDict(ignore=['_']) self.combined = combined_stats def visit(self, visitor): visitor.visit_tag_statistics(self) def __iter__(self): return iter(sorted(self.tags.values() + self.combined))
class HandlerStore(object): def __init__(self, source): self._source = source self._handlers = NormalizedDict(ignore='_') self._embedded = [] def add(self, handler, embedded=False): self._handlers[handler.name] = handler if embedded: self._embedded.append(handler) def __iter__(self): return self._handlers.itervalues() def __len__(self): return len(self._handlers) def __contains__(self, name): if name in self._handlers: return True for template in self._embedded: if template.matches(name): return True return False def __getitem__(self, name): try: return self._handlers[name] except KeyError: return self._find_embedded(name) def _find_embedded(self, name): embedded = [template.create(name) for template in self._embedded if template.matches(name)] if len(embedded) == 1: return embedded[0] self._raise_no_single_match(name, embedded) def _raise_no_single_match(self, name, found): if self._source is None: where = "Test case file" else: where = "Resource file '%s'" % self._source if not found: raise DataError("%s contains no keywords matching name '%s'." % (where, name)) names = seq2str([f.orig_name for f in found]) # TODO: List found on separate lines like kw/var recommendations. # Both here and in namespace when multiple keywords match. raise DataError("%s contains multiple keywords matching name '%s'.\n" "Found: %s" % (where, name, names))
class HandlerStore(object): def __init__(self, source): self._source = source self._handlers = NormalizedDict(ignore='_') self._embedded = [] def add(self, handler, embedded=False): self._handlers[handler.name] = handler if embedded: self._embedded.append(handler) def __iter__(self): return self._handlers.itervalues() def __len__(self): return len(self._handlers) def __contains__(self, name): if name in self._handlers: return True for template in self._embedded: if template.matches(name): return True return False def __getitem__(self, name): try: return self._handlers[name] except KeyError: return self._find_embedded(name) def _find_embedded(self, name): embedded = [template.create(name) for template in self._embedded if template.matches(name)] if len(embedded) == 1: return embedded[0] self._raise_no_single_match(name, embedded) def _raise_no_single_match(self, name, found): if self._source is None: where = "Test case file" else: where = "Resource file '%s'" % self._source if not found: raise DataError("%s contains no keywords matching name '%s'." % (where, name)) error = ["%s contains multiple keywords matching name '%s':" % (where, name)] names = sorted(handler.orig_name for handler in found) raise DataError('\n '.join(error + names))
def __init__(self): self._apps = NormalizedDict() self._old_apps = NormalizedDict() for alias, url in self._get_aliases_and_urls_from_db(): self._old_apps[alias] = url
def test_keys_are_sorted(self): nd = NormalizedDict((c, None) for c in 'aBcDeFg123XyZ___') assert_equal(list(nd.keys()), list('123_aBcDeFgXyZ'))
def test_unicode(self): nd = NormalizedDict({'a': u'\xe4', u'\xe4': 'a'}) if PY2: assert_equal(unicode(nd), "{'a': u'\\xe4', u'\\xe4': 'a'}") else: assert_equal(str(nd), u"{'a': '\xe4', '\xe4': 'a'}")
def test_eq_with_other_objects(self): nd = NormalizedDict() for other in ['string', 2, None, [], self.test_clear]: assert_false(nd == other, other) assert_true(nd != other, other)
def test_eq_with_normal_dict(self): self._verify_eq(NormalizedDict(), {})
def test_default_constructor(self): nd = NormalizedDict() nd['foo bar'] = 'value' assert_equal(nd['foobar'], 'value') assert_equal(nd['F oo\nBar'], 'value')
def test_hash(self): assert_raises(TypeError, hash, NormalizedDict())
def __setitem__(self, key, value): if not is_string(key): key = unic(key) if not is_string(value): value = unic(value) NormalizedDict.__setitem__(self, key, value)
def test_contains(self): nd = NormalizedDict({'Foo': 'bar'}) assert_true('Foo' in nd and 'foo' in nd and 'FOO' in nd)
def __init__(self, parent=None, source=None): self.parent = parent self.source = abspath(source) if source else None self.children = [] self._tables = NormalizedDict(self._get_tables())
def _normalize(self, tags): normalized = NormalizedDict([(str(t), None) for t in tags], ignore='_') for remove in '', 'NONE': if remove in normalized: normalized.pop(remove) return tuple(normalized)
def __init__(self, variables): self.data = NormalizedDict(ignore='_') self._variables = variables
class VariableStore(object): def __init__(self, variables): self.data = NormalizedDict(ignore='_') self._variables = variables def resolve_delayed(self): for name, value in list(self.data.items()): try: self._resolve_delayed(name, value) except DataError: pass def _resolve_delayed(self, name, value): if not self._is_resolvable(value): return value try: self.data[name] = value.resolve(self._variables) except DataError as err: # Recursive resolving may have already removed variable. if name in self: self.remove(name) value.report_error(err) variable_not_found('${%s}' % name, self.data, "Variable '${%s}' not found." % name) return self.data[name] def _is_resolvable(self, value): try: # isinstance can throw an exception in ironpython and jython return isinstance(value, VariableTableValueBase) except Exception: return False def __getitem__(self, name): return self._resolve_delayed(name, self.data[name]) def update(self, store): self.data.update(store.data) def clear(self): self.data.clear() def add(self, name, value, overwrite=True, decorated=True): if decorated: name, value = self._undecorate(name, value) if overwrite or name not in self.data: self.data[name] = value def _undecorate(self, name, value): validate_var(name) if name[0] == '@': if not is_list_like(value): self._raise_cannot_set_type(name, value, 'list') value = list(value) if name[0] == '&': if not is_dict_like(value): self._raise_cannot_set_type(name, value, 'dictionary') value = DotDict(value) return name[2:-1], value def _raise_cannot_set_type(self, name, value, expected): raise DataError("Cannot set variable '%s': Expected %s-like value, " "got %s." % (name, expected, type_name(value))) def remove(self, name): if name in self.data: self.data.pop(name) def __len__(self): return len(self.data) def __iter__(self): return iter(self.data) def __contains__(self, name): return name in self.data def as_dict(self, decoration=True): if decoration: variables = (self._decorate(name, self[name]) for name in self) else: variables = self.data return NormalizedDict(variables, ignore='_') def _decorate(self, name, value): if is_dict_like(value): name = '&{%s}' % name elif is_list_like(value): name = '@{%s}' % name else: name = '${%s}' % name return name, value
def as_dict(self, decoration=True): if decoration: variables = (self._decorate(name, self[name]) for name in self) else: variables = self.data return NormalizedDict(variables, ignore='_')
def test_initial_values_as_name_value_pairs(self): nd = NormalizedDict([('key', 'value'), ('F O\tO', 'bar')]) assert_equal(nd['key'], 'value') assert_equal(nd['K EY'], 'value') assert_equal(nd['foo'], 'bar')
def test_original_keys_are_preserved(self): nd = NormalizedDict({'low': 1, 'UP': 2}) nd['up'] = nd['Spa Ce'] = 3 assert_equals(list(nd.keys()), ['low', 'Spa Ce', 'UP']) assert_equals(list(nd.items()), [('low', 1), ('Spa Ce', 3), ('UP', 3)])
def test_initial_values_as_dict(self): nd = NormalizedDict({'key': 'value', 'F O\tO': 'bar'}) assert_equal(nd['key'], 'value') assert_equal(nd['K EY'], 'value') assert_equal(nd['foo'], 'bar')
def test_deleting_items(self): nd = NormalizedDict({'A': 1, 'b': 2}) del nd['A'] del nd['B'] assert_equals(nd._data, {}) assert_equals(list(nd.keys()), [])
def test_clear(self): nd = NormalizedDict({'a': 1, 'B': 2}) nd.clear() assert_equal(nd._data, {}) assert_equal(nd._keys, {})
def test_pop(self): nd = NormalizedDict({'A': 1, 'b': 2}) assert_equals(nd.pop('A'), 1) assert_equals(nd.pop('B'), 2) assert_equals(nd._data, {}) assert_equals(list(nd.keys()), [])
def test_ne(self): assert_false(NormalizedDict() != NormalizedDict()) assert_false(NormalizedDict({'a': 1}) != NormalizedDict({'a': 1})) assert_false(NormalizedDict({'a': 1}) != NormalizedDict({'A': 1}))
def test_pop_with_default(self): assert_equals(NormalizedDict().pop('nonex', 'default'), 'default')
def test_eq_with_user_dict(self): self._verify_eq(NormalizedDict(), UserDict())
def test_popitem_empty(self): assert_raises(KeyError, NormalizedDict().popitem)
def test_eq(self): self._verify_eq(NormalizedDict(), NormalizedDict())
def __init__(self, initial=None): NormalizedDict.__init__(self, initial, ignore='_')
def test_iter(self): keys = list('123_aBcDeF') nd = NormalizedDict((k, 1) for k in keys) assert_equal(list(nd), keys) assert_equal([key for key in nd], keys)
def test_ignore(self): nd = NormalizedDict(ignore=['_']) nd['foo_bar'] = 'value' assert_equal(nd['foobar'], 'value') assert_equal(nd['F oo\nB ___a r'], 'value')
class Applications: _database = DataBasePaths(True).getConnectedFile() def __init__(self): self._apps = NormalizedDict() self._old_apps = NormalizedDict() for alias, url in self._get_aliases_and_urls_from_db(): self._old_apps[alias] = url def _get_aliases_and_urls_from_db(self): items = [] for connection in self._read_lines(): items.append(connection.split('\t')) return items def _read_lines(self): if os.path.exists(self._database): f = open(self._database, 'rb') data = f.read().splitlines() f.close() return data return [] def add(self, alias, app): self._apps[alias] = app self._old_apps[alias] = app.rmi_url self._store() def _store(self): data = ['%s\t%s' % (alias, url) for alias, url in self._old_apps.items()] self._write('\n'.join(data)) print "*TRACE* Stored to connected applications database: ", data def _write(self, data): f = open(self._database, 'wb') f.write(data) f.close() def has_connected_to_application(self, alias): return self._apps.has_key(alias) def get_application(self, alias): return self._apps[alias] def get_applications(self): return self._apps.values() def get_aliases(self): return self._apps.keys() def delete(self, alias): del(self._apps[normalize(alias)]) del(self._old_apps[normalize(alias)]) self._store() def delete_all_connected(self): for alias in self._apps.keys(): self.delete(alias) def get_alias_for(self, application): for alias, app in self._apps.items(): if app == application: return alias return None def get_url(self, alias): for name, url in self._get_aliases_and_urls_from_db(): if eq(name, alias): return url return None
def start_suite(self): if not self._scopes: self._suite = NormalizedDict(ignore='_') else: self._suite = self._scopes[-1].copy() self._scopes.append(self._suite)