def test_init(self): with self.assertRaises(TypeError): OrderedDict([('a', 1), ('b', 2)], None) # too many args pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)] self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input self.assertEqual( list( OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5).items()), pairs) # mixed input # cyordereddict: remove this test because slot wrappers (on extension # types) cannot be inspected # make sure no positional args conflict with possible kwdargs # self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, # ['self']) # Make sure that direct calls to __init__ do not clear previous contents d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) d.__init__([('e', 5), ('f', 6)], g=7, d=4) self.assertEqual(list(d.items()), [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def __init__(self, versions=None, title='Submissions', id_string=None, default_version_id_key='__version__', strict_schema=False, root_node_name='data', asset_type=None, submissions_xml=None): if not versions: versions = [] # accept a single version, but normalize it to an iterable if isinstance(versions, dict): versions = [versions] self.versions = OrderedDict() # the name of the field in submissions which stores the version ID self.default_version_id_key = default_version_id_key self.id_string = id_string self.root_node_name = root_node_name self.title = title self.strict_schema = strict_schema self.asset_type = asset_type self.load_all_versions(versions)
def read_config(config_file, encoding='utf-8'): """ Returns a dictionary with subdictionaries of all configFile options/values Args: config_file - String path to the config file to be opened. Returns: dict1: A dictionary of dictionaires representing the config file. """ config = ConfigParser() config.optionxform = str config.read(config_file, encoding=encoding) sections = config.sections() dict1 = OrderedDict() for section in sections: options = config.options(section) dict2 = OrderedDict() for option in options: dict2[option.lower()] = config_type(config.get(section, option)) dict1[section.lower()] = dict2 return dict1
def test_clear(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) self.assertEqual(len(od), len(pairs)) od.clear() self.assertEqual(len(od), 0)
def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 1 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PerformancePeriod saved state is too old.") processed_transactions = {} processed_transactions.update(state.pop('processed_transactions')) orders_by_id = OrderedDict() orders_by_id.update(state.pop('orders_by_id')) orders_by_modified = {} orders_by_modified.update(state.pop('orders_by_modified')) self.processed_transactions = processed_transactions self.orders_by_id = orders_by_id self.orders_by_modified = orders_by_modified # pop positions to use for v1 positions = state.pop('positions', None) self.__dict__.update(state) if version == 1: # version 1 had PositionTracker logic inside of Period # we create the PositionTracker here. # Note: that in V2 it is assumed that the position_tracker # will be dependency injected and so is not reconstructed assert positions is not None, "positions should exist in v1" position_tracker = PositionTracker() position_tracker.update_positions(positions) self.position_tracker = position_tracker
def test_copying(self): # Check that ordered dicts are copyable, deepcopyable, picklable, # and have a repr/eval round-trip pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) update_test = OrderedDict() update_test.update(od) for label, dup in [ ('od.copy()', od.copy()), ('copy.copy(od)', copy.copy(od)), ('copy.deepcopy(od)', copy.deepcopy(od)), ('pickle.loads(pickle.dumps(od, 0))', pickle.loads(pickle.dumps(od, 0))), ('pickle.loads(pickle.dumps(od, 1))', pickle.loads(pickle.dumps(od, 1))), ('pickle.loads(pickle.dumps(od, 2))', pickle.loads(pickle.dumps(od, 2))), ('pickle.loads(pickle.dumps(od, 3))', pickle.loads(pickle.dumps(od, 3))), ('pickle.loads(pickle.dumps(od, -1))', pickle.loads(pickle.dumps(od, -1))), ('eval(repr(od))', eval(repr(od))), ('update_test', update_test), ('OrderedDict(od)', OrderedDict(od)), ]: with self.subTest(label=label): msg = "\ncopy: %s\nod: %s" % (dup, od) self.assertIsNot(dup, od, msg) self.assertEqual(dup, od)
def __init__(self, data, **params): for group in self._dim_groups + list(self._dim_aliases.keys()): if group in ['deep_dimensions', 'ddims']: continue if group in params: if group in self._dim_aliases: params[self._dim_aliases[group]] = params.pop(group) group = self._dim_aliases[group] if group == 'cdims': dimensions = { d if isinstance(d, Dimension) else Dimension(d): val for d, val in params.pop(group).items() } else: dimensions = [ d if isinstance(d, Dimension) else Dimension(d) for d in params.pop(group) ] params[group] = dimensions super(Dimensioned, self).__init__(data, **params) self.ndims = len(self.kdims) cdims = [(d.name, val) for d, val in self.cdims.items()] self._cached_constants = OrderedDict(cdims) self._cached_index_names = [d.name for d in self.kdims] self._cached_value_names = [d.name for d in self.vdims] self._settings = None
def __init__(self, initial_items=None, **params): if isinstance(initial_items, NdMapping): map_type = type(initial_items) own_params = self.params() new_params = dict(initial_items.get_param_values(onlychanged=True)) if new_params.get('group') == map_type.__name__: new_params.pop('group') params = dict( { name: value for name, value in new_params.items() if name in own_params }, **params) super(MultiDimensionalMapping, self).__init__(OrderedDict(), **params) self._next_ind = 0 self._check_key_type = True self._cached_index_types = [d.type for d in self.key_dimensions] self._cached_index_values = { d.name: d.values for d in self.key_dimensions } self._cached_categorical = any(d.values for d in self.key_dimensions) if isinstance(initial_items, tuple): self._add_item(initial_items[0], initial_items[1]) elif initial_items is not None: self.update(OrderedDict(initial_items))
def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 1 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PerformancePeriod saved state is too old.") processed_transactions = defaultdict(list) processed_transactions.update(state.pop('processed_transactions')) orders_by_id = OrderedDict() orders_by_id.update(state.pop('orders_by_id')) orders_by_modified = defaultdict(OrderedDict) orders_by_modified.update(state.pop('orders_by_modified')) positions = positiondict() positions.update(state.pop('positions')) _positions_store = zp.Positions() _positions_store.update(state.pop('_positions_store')) self.processed_transactions = processed_transactions self.orders_by_id = orders_by_id self.orders_by_modified = orders_by_modified self.positions = positions self._positions_store = _positions_store self.__dict__.update(state) self.initialize_position_calc_arrays()
def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 1 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PerformancePeriod saved state is too old.") processed_transactions = defaultdict(list) processed_transactions.update(state.pop('processed_transactions')) orders_by_id = OrderedDict() orders_by_id.update(state.pop('orders_by_id')) orders_by_modified = defaultdict(OrderedDict) orders_by_modified.update(state.pop('orders_by_modified')) self.processed_transactions = processed_transactions self.orders_by_id = orders_by_id self.orders_by_modified = orders_by_modified # pop positions to use for v1 positions = state.pop('positions', None) self.__dict__.update(state) if version == 1: # version 1 had PositionTracker logic inside of Period # we create the PositionTracker here. # Note: that in V2 it is assumed that the position_tracker # will be dependency injected and so is not reconstructed assert positions is not None, "positions should exist in v1" position_tracker = PositionTracker() position_tracker.update_positions(positions) self.position_tracker = position_tracker
def test_delitem(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) del od['a'] self.assertNotIn('a', od) with self.assertRaises(KeyError): del od['a'] self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def trees(self): """ :returns: an ordered dictionary of parse trees (``Tree`` objects with integer indices as leaves).""" if not self._trees_cache: self._trees_cache = OrderedDict((n, self._parsetree(a)) for n, a in self._read_blocks()) return OrderedDict((n, a.tree) for n, a in self._trees_cache.items())
def blocks(self): """ :returns: a list of strings containing the raw representation of trees in the treebank.""" if self._block_cache is None: self._block_cache = OrderedDict(self._read_blocks()) return OrderedDict((n, ElementTree.tostring(a)) for n, a in self._block_cache.items())
def _stats(self): _stats = OrderedDict() _stats['id_string'] = self._get_id_string() _stats['version'] = self.id _stats['row_count'] = len(self.schema.get('content', {}).get('survey', [])) # returns stats in the format [ key="value" ] return '\n\t'.join(map(lambda key: '%s="%s"' % (key, str(_stats[key])), _stats.keys()))
def test_yaml_linkage(self): # Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature. # In yaml, lists are native but tuples are not. pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) # yaml.dump(od) --> # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n' self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
def sents(self): """ :returns: an ordered dictionary of sentences, each sentence being a list of words.""" if not self._trees_cache: self._trees_cache = OrderedDict((n, self._parsetree(a)) for n, a in self._read_blocks()) return OrderedDict((n, a.sent) for n, a in self._trees_cache.items())
def test_yaml_linkage(self): # Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature. # In yaml, lists are native but tuples are not. pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) # yaml.dump(od) --> # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n' self.assertTrue(all(type(pair) == list for pair in od.__reduce__()[1]))
def test_repr(self): od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual( repr(od), "OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])" ) self.assertEqual(eval(repr(od)), od) self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def __init__(self): # sid => position object self.positions = positiondict() # Arrays for quick calculations of positions value self._position_amounts = OrderedDict() self._position_last_sale_prices = OrderedDict() self._unpaid_dividends = pd.DataFrame( columns=zp.DIVIDEND_PAYMENT_FIELDS, ) self._positions_store = zp.Positions()
def test_popitem(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) while pairs: self.assertEqual(od.popitem(), pairs.pop()) with self.assertRaises(KeyError): od.popitem() self.assertEqual(len(od), 0)
def test_iterators(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) self.assertEqual(list(od), [t[0] for t in pairs]) self.assertEqual(list(od.keys()), [t[0] for t in pairs]) self.assertEqual(list(od.values()), [t[1] for t in pairs]) self.assertEqual(list(od.items()), pairs) self.assertEqual(list(reversed(od)), [t[0] for t in reversed(pairs)])
def test_reinsert(self): # Given insert a, insert b, delete a, re-insert a, # verify that a is now later than b. od = OrderedDict() od['a'] = 1 od['b'] = 2 del od['a'] od['a'] = 1 self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
def _stats(self): _stats = OrderedDict() _stats['id_string'] = self.id_string _stats['versions'] = len(self.versions) # _stats['submissions'] = self.submissions_count() _stats['row_count'] = len(self[-1].schema.get('content', {}) .get('survey', [])) # returns stats in the format [ key="value" ] return '\n\t'.join('%s="%s"' % item for item in _stats.items())
def _stats(self): _stats = OrderedDict() _stats['id_string'] = self.id_string _stats['versions'] = len(self.versions) # _stats['submissions'] = self.submissions_count() _stats['row_count'] = len(self[-1].schema.get('content', {}).get('survey', [])) # returns stats in the format [ key="value" ] return '\n\t'.join('%s="%s"' % item for item in _stats.items())
def _stats(self): _stats = OrderedDict() _stats['id_string'] = self._get_id_string() _stats['version'] = self.id _stats['row_count'] = len( self.schema.get('content', {}).get('survey', [])) # returns stats in the format [ key="value" ] return '\n\t'.join( map(lambda key: '%s="%s"' % (key, str(_stats[key])), _stats.keys()))
def tagged_sents(self): """ :returns: an ordered dictionary of tagged sentences, each tagged sentence being a list of (word, tag) pairs.""" if not self._trees_cache: self._trees_cache = OrderedDict((n, self._parsetree(a)) for n, a in self._read_blocks()) return OrderedDict( (n, [(w, t) for w, (_, t) in zip(a.sent, sorted(a.tree.pos()))]) for n, a in self._trees_cache.items())
def __init__(self, asset_finder): self.asset_finder = asset_finder # sid => position object self.positions = positiondict() # Arrays for quick calculations of positions value self._position_value_multipliers = OrderedDict() self._position_exposure_multipliers = OrderedDict() self._unpaid_dividends = pd.DataFrame( columns=zp.DIVIDEND_PAYMENT_FIELDS, ) self._positions_store = zp.Positions()
def _resort(self): """ Sorts data by key using usual Python tuple sorting semantics or sorts in categorical order for any categorical Dimensions. """ sortkws = {} dimensions = self.key_dimensions if self._cached_categorical: sortkws['key'] = lambda x: tuple(dimensions[i].values.index(x[0][ i]) if dimensions[i].values else x[0][i] for i in range(self.ndims)) self.data = OrderedDict(sorted(self.data.items(), **sortkws))
def writegrammar(grammar, bitpar=False): """Write a grammar in a simple text file format. Rules are written in the order as they appear in the sequence `grammar`, except that the lexicon file lists words in sorted order (with tags for each word in the order of `grammar`). For a description of the file format, see ``docs/fileformats.rst``. :param grammar: a sequence of rule tuples, as produced by ``treebankgrammar()``, ``dopreduction()``, or ``doubledop()``. :param bitpar: when ``True``, use bitpar format: for rules, put weight first and leave out the yield function. By default, a format that supports LCFRS is used. :returns: tuple of strings``(rules, lexicon)`` Weights are written in the following format: - if ``bitpar`` is ``False``, write rational fractions; e.g., ``2/3``. - if ``bitpar`` is ``True``, write frequencies (e.g., ``2``) if probabilities sum to 1, i.e., in that case probabilities can be re-computed as relative frequencies. Otherwise, resort to floating point numbers (e.g., ``0.666``, imprecise).""" rules, lexicon = [], [] lexical = OrderedDict() freqs = bitpar for (r, yf), w in grammar: if isinstance(w, tuple): if freqs: w = '%g' % w[0] else: w1, w2 = w if bitpar: w = '%s' % (w1 / w2) # .hex() else: w = '%s/%s' % (w1, w2) elif isinstance(w, float): w = w.hex() if len(r) == 2 and r[1] == 'Epsilon': lexical.setdefault(yf[0], []).append((r[0], w)) continue elif bitpar: rules.append(('%s\t%s\n' % (w, '\t'.join(x for x in r)))) else: yfstr = ','.join(''.join(map(str, a)) for a in yf) rules.append(('%s\t%s\t%s\n' % ( '\t'.join(x for x in r), yfstr, w))) for word in lexical: lexicon.append(unescape(word)) for tag, w in lexical[word]: lexicon.append('\t%s %s' % (tag, w)) lexicon.append('\n') return ''.join(rules), ''.join(lexicon)
def writegrammar(grammar, bitpar=False): """Write a grammar in a simple text file format. Rules are written in the order as they appear in the sequence `grammar`, except that the lexicon file lists words in sorted order (with tags for each word in the order of `grammar`). For a description of the file format, see ``docs/fileformats.rst``. :param grammar: a sequence of rule tuples, as produced by ``treebankgrammar()``, ``dopreduction()``, or ``doubledop()``. :param bitpar: when ``True``, use bitpar format: for rules, put weight first and leave out the yield function. By default, a format that supports LCFRS is used. :returns: tuple of strings``(rules, lexicon)`` Weights are written in the following format: - if ``bitpar`` is ``False``, write rational fractions; e.g., ``2/3``. - if ``bitpar`` is ``True``, write frequencies (e.g., ``2``) if probabilities sum to 1, i.e., in that case probabilities can be re-computed as relative frequencies. Otherwise, resort to floating point numbers (e.g., ``0.666``, imprecise).""" rules, lexicon = [], [] lexical = OrderedDict() freqs = bitpar for (r, yf), w in grammar: if isinstance(w, tuple): if freqs: w = '%g' % w[0] else: w1, w2 = w if bitpar: w = '%s' % (w1 / w2) # .hex() else: w = '%s/%s' % (w1, w2) elif isinstance(w, float): w = w.hex() if len(r) == 2 and r[1] == 'Epsilon': lexical.setdefault(yf[0], []).append((r[0], w)) continue elif bitpar: rules.append(('%s\t%s\n' % (w, '\t'.join(x for x in r)))) else: yfstr = ','.join(''.join(map(str, a)) for a in yf) rules.append( ('%s\t%s\t%s\n' % ('\t'.join(x for x in r), yfstr, w))) for word in lexical: lexicon.append(unescape(word)) for tag, w in lexical[word]: lexicon.append('\t%s %s' % (tag, w)) lexicon.append('\n') return ''.join(rules), ''.join(lexicon)
def __init__(self, asset_finder, data_frequency): self.asset_finder = asset_finder # sid => position object self.positions = positiondict() # Arrays for quick calculations of positions value self._position_value_multipliers = OrderedDict() self._position_exposure_multipliers = OrderedDict() self._unpaid_dividends = {} self._unpaid_stock_dividends = {} self._positions_store = zp.Positions() self.data_frequency = data_frequency
def test_equality(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od1 = OrderedDict(pairs) od2 = OrderedDict(pairs) self.assertEqual(od1, od2) # same order implies equality pairs = pairs[2:] + pairs[:2] od2 = OrderedDict(pairs) self.assertNotEqual(od1, od2) # different order implies inequality # comparison to regular dict is not order sensitive self.assertEqual(od1, dict(od2)) self.assertEqual(dict(od2), od1) # different length implied inequality self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def __init__(self, asset_finder): self.asset_finder = asset_finder # sid => position object self.positions = positiondict() # Arrays for quick calculations of positions value self._position_value_multipliers = OrderedDict() self._position_exposure_multipliers = OrderedDict() self._unpaid_dividends = pd.DataFrame( columns=zp.DIVIDEND_PAYMENT_FIELDS, ) self._positions_store = zp.Positions() # Dict, keyed on dates, that contains lists of close position events # for any Assets in this tracker's positions self._auto_close_position_sids = {}
def _propagate(self, path, val): """ Propagate the value up to the root node. """ if val == '_DELETE': if path in self.data: del self.data[path] else: items = [(key, v) for key, v in self.data.items() if not all(k == p for k, p in zip(key, path))] self.data = OrderedDict(items) else: self.data[path] = val if self.parent is not None: self.parent._propagate((self.identifier, ) + path, val)
def __init__(self, versions=None, title='Submissions', id_string=None, default_version_id_key='__version__', strict_schema=False, root_node_name='data', asset_type=None, submissions_xml=None): if not versions: versions = [] # accept a single version, but normalize it to an iterable if isinstance(versions, dict): versions = [versions] self.versions = OrderedDict() # the name of the field in submissions which stores the version ID self.default_version_id_key = default_version_id_key self.id_string = id_string self.root_node_name = root_node_name self.title = title self.strict_schema = strict_schema if len(self.title) > 31: # excel sheet name size limit self.title = self.title[:28] + '...' self.asset_type = asset_type self.load_all_versions(versions)
def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 3 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PositionTracker saved state is too old.") self.asset_finder = state['asset_finder'] self.positions = positiondict() # note that positions_store is temporary and gets regened from # .positions self._positions_store = zp.Positions() self._unpaid_dividends = state['unpaid_dividends'] self._auto_close_position_sids = state['auto_close_position_sids'] # Arrays for quick calculations of positions value self._position_amounts = OrderedDict() self._position_last_sale_prices = OrderedDict() self._position_value_multipliers = OrderedDict() self._position_exposure_multipliers = OrderedDict() self._position_payout_multipliers = OrderedDict() # Update positions is called without a finder self.update_positions(state['positions'])
class AutoSummClassDocumenter(ClassDocumenter, AutosummaryDocumenter): """Class documentor suitable for the :class:`AutoSummDirective` This class has the same functionality as the base :class:`sphinx.ext.autodoc.ClassDocumenter` class but with an additional `autosummary` option to provide the ability to provide a summary of all methods and attributes at the beginning. It's priority is slightly higher than the one of the ClassDocumenter""" #: slightly higher priority than #: :class:`sphinx.ext.autodoc.ClassDocumenter` priority = ClassDocumenter.priority + 0.1 #: original option_spec from :class:`sphinx.ext.autodoc.ClassDocumenter` #: but with additional autosummary boolean option option_spec = ClassDocumenter.option_spec.copy() option_spec['autosummary'] = bool_option #: Add options for members for the autosummary for _option in member_options.intersection(option_spec): option_spec['autosummary-' + _option] = option_spec[_option] del _option member_sections = OrderedDict([ (ad.ClassDocumenter.member_order, 'Classes'), (ad.MethodDocumenter.member_order, 'Methods'), (ad.AttributeDocumenter.member_order, 'Attributes'), ]) """:class:`~collections.OrderedDict` that includes the autosummary sections
def add_dimension(self, dimension, dim_pos, dim_val, **kwargs): """ Create a new object with an additional key dimensions along which items are indexed. Requires the dimension name, the desired position in the key_dimensions and a key value that will be used across the dimension. This is particularly useful for merging several mappings together. """ if isinstance(dimension, str): dimension = Dimension(dimension) if dimension.name in self._cached_index_names: raise Exception( '{dim} dimension already defined'.format(dim=dimension.name)) dimensions = self.key_dimensions[:] dimensions.insert(dim_pos, dimension) items = OrderedDict() for key, val in self.data.items(): new_key = list(key) new_key.insert(dim_pos, dim_val) items[tuple(new_key)] = val return self.clone(items, key_dimensions=dimensions, **kwargs)
def test_detect_deletion_during_iteration(self): od = OrderedDict.fromkeys('abc') it = iter(od) key = next(it) del od[key] with self.assertRaises(Exception): # Note, the exact exception raised is not guaranteed # The only guarantee that the next() will not succeed next(it)
def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 1 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PositionTracker saved state is too old.") self.positions = positiondict() # note that positions_store is temporary and gets regened from # .positions self._positions_store = zp.Positions() self._unpaid_dividends = state['unpaid_dividends'] # Arrays for quick calculations of positions value self._position_amounts = OrderedDict() self._position_last_sale_prices = OrderedDict() self.update_positions(state['positions'])
def test_copying(self): # Check that ordered dicts are copyable, deepcopyable, picklable, # and have a repr/eval round-trip pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) def check(dup): msg = "\ncopy: %s\nod: %s" % (dup, od) self.assertIsNot(dup, od, msg) self.assertEqual(dup, od) check(od.copy()) check(copy.copy(od)) check(copy.deepcopy(od)) for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): check(pickle.loads(pickle.dumps(od, proto))) check(eval(repr(od))) update_test = OrderedDict() update_test.update(od) check(update_test) check(OrderedDict(od))
def test_init(self): with self.assertRaises(TypeError): OrderedDict([('a', 1), ('b', 2)], None) # too many args pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)] self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5).items()), pairs) # mixed input # cyordereddict: remove this test because slot wrappers (on extension # types) cannot be inspected # make sure no positional args conflict with possible kwdargs # self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args, # ['self']) # Make sure that direct calls to __init__ do not clear previous contents d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) d.__init__([('e', 5), ('f', 6)], g=7, d=4) self.assertEqual(list(d.items()), [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_init(self): with self.assertRaises(TypeError): OrderedDict([('a', 1), ('b', 2)], None) # too many args pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)] self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5).items()), pairs) # mixed input # make sure no positional args conflict with possible kwdargs self.assertEqual(list(OrderedDict(self=42).items()), [('self', 42)]) self.assertEqual(list(OrderedDict(other=42).items()), [('other', 42)]) self.assertRaises(TypeError, OrderedDict, 42) self.assertRaises(TypeError, OrderedDict, (), ()) self.assertRaises(TypeError, OrderedDict.__init__) # Make sure that direct calls to __init__ do not clear previous contents d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) d.__init__([('e', 5), ('f', 6)], g=7, d=4) self.assertEqual(list(d.items()), [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_move_to_end(self): od = OrderedDict.fromkeys('abcde') self.assertEqual(list(od), list('abcde')) od.move_to_end('c') self.assertEqual(list(od), list('abdec')) od.move_to_end('c', 0) self.assertEqual(list(od), list('cabde')) od.move_to_end('c', 0) self.assertEqual(list(od), list('cabde')) od.move_to_end('e') self.assertEqual(list(od), list('cabde')) with self.assertRaises(KeyError): od.move_to_end('x')
def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 3 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PerformancePeriod saved state is too old.") processed_transactions = {} processed_transactions.update(state.pop('processed_transactions')) orders_by_id = OrderedDict() orders_by_id.update(state.pop('orders_by_id')) orders_by_modified = {} orders_by_modified.update(state.pop('orders_by_modified')) self.processed_transactions = processed_transactions self.orders_by_id = orders_by_id self.orders_by_modified = orders_by_modified self._execution_cash_flow_multipliers = {} self.__dict__.update(state)
def test_pop(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) shuffle(pairs) while pairs: k, v = pairs.pop() self.assertEqual(od.pop(k), v) with self.assertRaises(KeyError): od.pop('xyz') self.assertEqual(len(od), 0) self.assertEqual(od.pop(k, 12345), 12345) # make sure pop still works when __missing__ is defined class Missing(OrderedDict): def __missing__(self, key): return 0 m = Missing(a=1) self.assertEqual(m.pop('b', 5), 5) self.assertEqual(m.pop('a', 6), 1) self.assertEqual(m.pop('a', 6), 6) with self.assertRaises(KeyError): m.pop('a')
def read_config(config_file, default_config=None): """ This function is from tonic (author: Joe Hamman) Return a dictionary with subdictionaries of all configFile options/values """ from netCDF4 import Dataset try: from cyordereddict import OrderedDict except: from collections import OrderedDict try: from configparser import SafeConfigParser except: from ConfigParser import SafeConfigParser import configobj config = SafeConfigParser() config.optionxform = str config.read(config_file) sections = config.sections() dict1 = OrderedDict() for section in sections: options = config.options(section) dict2 = OrderedDict() for option in options: dict2[option] = config_type(config.get(section, option)) dict1[section] = dict2 if default_config is not None: for name, section in dict1.items(): if name in default_config.keys(): for option, key in default_config[name].items(): if option not in section.keys(): dict1[name][option] = key return dict1
def _propagate(self, path, val): """ Propagate the value up to the root node. """ if val == '_DELETE': if path in self.data: del self.data[path] else: items = [(key, v) for key, v in self.data.items() if not all(k==p for k, p in zip(key, path))] self.data = OrderedDict(items) else: self.data[path] = val if self.parent is not None: self.parent._propagate((self.identifier,)+path, val)
def __init__(self, data, **params): for group in self._dim_groups[0:2]: if group in params: if "constant" in group: dimensions = {d if isinstance(d, Dimension) else Dimension(d): val for d, val in params.pop(group)} else: dimensions = [d if isinstance(d, Dimension) else Dimension(d) for d in params.pop(group)] params[group] = dimensions super(Dimensioned, self).__init__(data, **params) self.ndims = len(self.key_dimensions) constant_dimensions = [(d.name, val) for d, val in self.constant_dimensions.items()] self._cached_constants = OrderedDict(constant_dimensions) self._cached_index_names = [d.name for d in self.key_dimensions] self._cached_value_names = [d.name for d in self.value_dimensions] self._settings = None