def read_config(config_file, encoding='utf-8'): """ Returns a dictionary with subdictionaries of all configFile options/values Args: config_file - String path to the config file to be opened. Returns: dict1: A dictionary of dictionaires representing the config file. """ config = ConfigParser() config.optionxform = str config.read(config_file, encoding=encoding) sections = config.sections() dict1 = OrderedDict() for section in sections: options = config.options(section) dict2 = OrderedDict() for option in options: dict2[option.lower()] = config_type(config.get(section, option)) dict1[section.lower()] = dict2 return dict1
def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 3 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PositionTracker saved state is too old.") self.asset_finder = state['asset_finder'] self.positions = positiondict() # note that positions_store is temporary and gets regened from # .positions self._positions_store = zp.Positions() self._unpaid_dividends = state['unpaid_dividends'] self._auto_close_position_sids = state['auto_close_position_sids'] # Arrays for quick calculations of positions value self._position_amounts = OrderedDict() self._position_last_sale_prices = OrderedDict() self._position_value_multipliers = OrderedDict() self._position_exposure_multipliers = OrderedDict() self._position_payout_multipliers = OrderedDict() # Update positions is called without a finder self.update_positions(state['positions'])
def __init__(self, initial_items=None, **params): if isinstance(initial_items, NdMapping): map_type = type(initial_items) own_params = self.params() new_params = dict(initial_items.get_param_values(onlychanged=True)) if new_params.get('group') == map_type.__name__: new_params.pop('group') params = dict( { name: value for name, value in new_params.items() if name in own_params }, **params) super(MultiDimensionalMapping, self).__init__(OrderedDict(), **params) self._next_ind = 0 self._check_key_type = True self._cached_index_types = [d.type for d in self.key_dimensions] self._cached_index_values = { d.name: d.values for d in self.key_dimensions } self._cached_categorical = any(d.values for d in self.key_dimensions) if isinstance(initial_items, tuple): self._add_item(initial_items[0], initial_items[1]) elif initial_items is not None: self.update(OrderedDict(initial_items))
def test_copying(self): # Check that ordered dicts are copyable, deepcopyable, picklable, # and have a repr/eval round-trip pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) update_test = OrderedDict() update_test.update(od) for label, dup in [ ('od.copy()', od.copy()), ('copy.copy(od)', copy.copy(od)), ('copy.deepcopy(od)', copy.deepcopy(od)), ('pickle.loads(pickle.dumps(od, 0))', pickle.loads(pickle.dumps(od, 0))), ('pickle.loads(pickle.dumps(od, 1))', pickle.loads(pickle.dumps(od, 1))), ('pickle.loads(pickle.dumps(od, 2))', pickle.loads(pickle.dumps(od, 2))), ('pickle.loads(pickle.dumps(od, 3))', pickle.loads(pickle.dumps(od, 3))), ('pickle.loads(pickle.dumps(od, -1))', pickle.loads(pickle.dumps(od, -1))), ('eval(repr(od))', eval(repr(od))), ('update_test', update_test), ('OrderedDict(od)', OrderedDict(od)), ]: with self.subTest(label=label): msg = "\ncopy: %s\nod: %s" % (dup, od) self.assertIsNot(dup, od, msg) self.assertEqual(dup, od)
def blocks(self): """ :returns: a list of strings containing the raw representation of trees in the treebank.""" if self._block_cache is None: self._block_cache = OrderedDict(self._read_blocks()) return OrderedDict((n, ElementTree.tostring(a)) for n, a in self._block_cache.items())
def sents(self): """ :returns: an ordered dictionary of sentences, each sentence being a list of words.""" if not self._trees_cache: self._trees_cache = OrderedDict((n, self._parsetree(a)) for n, a in self._read_blocks()) return OrderedDict((n, a.sent) for n, a in self._trees_cache.items())
def trees(self): """ :returns: an ordered dictionary of parse trees (``Tree`` objects with integer indices as leaves).""" if not self._trees_cache: self._trees_cache = OrderedDict((n, self._parsetree(a)) for n, a in self._read_blocks()) return OrderedDict((n, a.tree) for n, a in self._trees_cache.items())
def test_repr(self): od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual( repr(od), "OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])" ) self.assertEqual(eval(repr(od)), od) self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def __init__(self): # sid => position object self.positions = positiondict() # Arrays for quick calculations of positions value self._position_amounts = OrderedDict() self._position_last_sale_prices = OrderedDict() self._unpaid_dividends = pd.DataFrame( columns=zp.DIVIDEND_PAYMENT_FIELDS, ) self._positions_store = zp.Positions()
def tagged_sents(self): """ :returns: an ordered dictionary of tagged sentences, each tagged sentence being a list of (word, tag) pairs.""" if not self._trees_cache: self._trees_cache = OrderedDict((n, self._parsetree(a)) for n, a in self._read_blocks()) return OrderedDict( (n, [(w, t) for w, (_, t) in zip(a.sent, sorted(a.tree.pos()))]) for n, a in self._trees_cache.items())
def __init__(self, asset_finder): self.asset_finder = asset_finder # sid => position object self.positions = positiondict() # Arrays for quick calculations of positions value self._position_value_multipliers = OrderedDict() self._position_exposure_multipliers = OrderedDict() self._unpaid_dividends = pd.DataFrame( columns=zp.DIVIDEND_PAYMENT_FIELDS, ) self._positions_store = zp.Positions()
def __init__(self, asset_finder, data_frequency): self.asset_finder = asset_finder # sid => position object self.positions = positiondict() # Arrays for quick calculations of positions value self._position_value_multipliers = OrderedDict() self._position_exposure_multipliers = OrderedDict() self._unpaid_dividends = {} self._unpaid_stock_dividends = {} self._positions_store = zp.Positions() self.data_frequency = data_frequency
def test_equality(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od1 = OrderedDict(pairs) od2 = OrderedDict(pairs) self.assertEqual(od1, od2) # same order implies equality pairs = pairs[2:] + pairs[:2] od2 = OrderedDict(pairs) self.assertNotEqual(od1, od2) # different order implies inequality # comparison to regular dict is not order sensitive self.assertEqual(od1, dict(od2)) self.assertEqual(dict(od2), od1) # different length implied inequality self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def test_init(self): with self.assertRaises(TypeError): OrderedDict([('a', 1), ('b', 2)], None) # too many args pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)] self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input self.assertEqual( list( OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5).items()), pairs) # mixed input # make sure no positional args conflict with possible kwdargs self.assertEqual(list(OrderedDict(self=42).items()), [('self', 42)]) self.assertEqual(list(OrderedDict(other=42).items()), [('other', 42)]) self.assertRaises(TypeError, OrderedDict, 42) self.assertRaises(TypeError, OrderedDict, (), ()) self.assertRaises(TypeError, OrderedDict.__init__) # Make sure that direct calls to __init__ do not clear previous contents d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)]) d.__init__([('e', 5), ('f', 6)], g=7, d=4) self.assertEqual(list(d.items()), [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def __init__(self, asset_finder): self.asset_finder = asset_finder # sid => position object self.positions = positiondict() # Arrays for quick calculations of positions value self._position_value_multipliers = OrderedDict() self._position_exposure_multipliers = OrderedDict() self._unpaid_dividends = pd.DataFrame( columns=zp.DIVIDEND_PAYMENT_FIELDS, ) self._positions_store = zp.Positions() # Dict, keyed on dates, that contains lists of close position events # for any Assets in this tracker's positions self._auto_close_position_sids = {}
def groupby_pandas(self_or_cls, ndmapping, dimensions, container_type, group_type, sort=False, **kwargs): if 'kdims' in kwargs: idims = [ndmapping.get_dimension(d) for d in kwargs['kdims']] else: idims = [dim for dim in ndmapping.kdims if dim not in dimensions] all_dims = [d.name for d in ndmapping.kdims] inds = [ndmapping.get_dimension_index(dim) for dim in idims] getter = operator.itemgetter(*inds) if inds else lambda x: tuple() multi_index = pd.MultiIndex.from_tuples(ndmapping.keys(), names=all_dims) df = pd.DataFrame(list(map(wrap_tuple, ndmapping.values())), index=multi_index) kwargs = dict(dict(get_param_values(ndmapping), kdims=idims), **kwargs) groups = ((wrap_tuple(k), group_type(OrderedDict(unpack_group(group, getter)), **kwargs)) for k, group in df.groupby( level=[d.name for d in dimensions])) if sort: selects = list(get_unique_keys(ndmapping, dimensions)) groups = sorted(groups, key=lambda x: selects.index(x[0])) return container_type(groups, kdims=dimensions)
def all_from_json_definition(cls, definition, translation_list): all_choices = {} for choice_definition in definition: choice_name = choice_definition.get('name') choice_key = choice_definition.get('list_name') if not choice_name or not choice_key: continue if choice_key not in all_choices: all_choices[choice_key] = FormChoice(choice_key) choices = all_choices[choice_key] option = choices.options[choice_name] = {} # apparently choices dont need a label if they have an image if 'label' in choice_definition: _label = choice_definition['label'] else: _label = choice_definition.get('image') if isinstance(_label, basestring): _label = [_label] elif _label is None and len(translation_list) == 1: _label = [None] option['labels'] = OrderedDict(zip(translation_list, _label)) option['name'] = choice_name return all_choices
def blocks(self): """ :returns: a list of strings containing the raw representation of trees in the treebank.""" if self._block_cache is None: self._block_cache = OrderedDict(self._read_blocks()) return self._block_cache
def test_clear(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) self.assertEqual(len(od), len(pairs)) od.clear() self.assertEqual(len(od), 0)
class AutoSummClassDocumenter(ClassDocumenter, AutosummaryDocumenter): """Class documentor suitable for the :class:`AutoSummDirective` This class has the same functionality as the base :class:`sphinx.ext.autodoc.ClassDocumenter` class but with an additional `autosummary` option to provide the ability to provide a summary of all methods and attributes at the beginning. It's priority is slightly higher than the one of the ClassDocumenter""" #: slightly higher priority than #: :class:`sphinx.ext.autodoc.ClassDocumenter` priority = ClassDocumenter.priority + 0.1 #: original option_spec from :class:`sphinx.ext.autodoc.ClassDocumenter` #: but with additional autosummary boolean option option_spec = ClassDocumenter.option_spec.copy() option_spec['autosummary'] = bool_option #: Add options for members for the autosummary for _option in member_options.intersection(option_spec): option_spec['autosummary-' + _option] = option_spec[_option] del _option member_sections = OrderedDict([ (ad.ClassDocumenter.member_order, 'Classes'), (ad.MethodDocumenter.member_order, 'Methods'), (ad.AttributeDocumenter.member_order, 'Attributes'), ]) """:class:`~collections.OrderedDict` that includes the autosummary sections
def __init__(self, data, **params): for group in self._dim_groups + list(self._dim_aliases.keys()): if group in ['deep_dimensions', 'ddims']: continue if group in params: if group in self._dim_aliases: params[self._dim_aliases[group]] = params.pop(group) group = self._dim_aliases[group] if group == 'cdims': dimensions = { d if isinstance(d, Dimension) else Dimension(d): val for d, val in params.pop(group).items() } else: dimensions = [ d if isinstance(d, Dimension) else Dimension(d) for d in params.pop(group) ] params[group] = dimensions super(Dimensioned, self).__init__(data, **params) self.ndims = len(self.kdims) cdims = [(d.name, val) for d, val in self.cdims.items()] self._cached_constants = OrderedDict(cdims) self._cached_index_names = [d.name for d in self.kdims] self._cached_value_names = [d.name for d in self.vdims] self._settings = None
def add_dimension(self, dimension, dim_pos, dim_val, **kwargs): """ Create a new object with an additional key dimensions along which items are indexed. Requires the dimension name, the desired position in the key_dimensions and a key value that will be used across the dimension. This is particularly useful for merging several mappings together. """ if isinstance(dimension, str): dimension = Dimension(dimension) if dimension.name in self._cached_index_names: raise Exception( '{dim} dimension already defined'.format(dim=dimension.name)) dimensions = self.key_dimensions[:] dimensions.insert(dim_pos, dimension) items = OrderedDict() for key, val in self.data.items(): new_key = list(key) new_key.insert(dim_pos, dim_val) items[tuple(new_key)] = val return self.clone(items, key_dimensions=dimensions, **kwargs)
def __init__(self, versions=None, title='Submissions', id_string=None, default_version_id_key='__version__', strict_schema=False, root_node_name='data', asset_type=None, submissions_xml=None): if not versions: versions = [] # accept a single version, but normalize it to an iterable if isinstance(versions, dict): versions = [versions] self.versions = OrderedDict() # the name of the field in submissions which stores the version ID self.default_version_id_key = default_version_id_key self.id_string = id_string self.root_node_name = root_node_name self.title = title self.strict_schema = strict_schema self.asset_type = asset_type self.load_all_versions(versions)
class AutoSummModuleDocumenter(ModuleDocumenter, AutosummaryDocumenter): """Module documentor suitable for the :class:`AutoSummDirective` This class has the same functionality as the base :class:`sphinx.ext.autodoc.ModuleDocumenter` class but with an additional `autosummary` and the :meth:`get_grouped_documenters` method. It's priority is slightly higher than the one of the ModuleDocumenter.""" #: slightly higher priority than #: :class:`sphinx.ext.autodoc.ModuleDocumenter` priority = ModuleDocumenter.priority + 0.1 #: original option_spec from :class:`sphinx.ext.autodoc.ModuleDocumenter` #: but with additional autosummary boolean option option_spec = ModuleDocumenter.option_spec.copy() option_spec['autosummary'] = bool_option #: Add options for members for the autosummary for _option in member_options.intersection(option_spec): option_spec['autosummary-' + _option] = option_spec[_option] del _option member_sections = OrderedDict([ (ad.ClassDocumenter.member_order, 'Classes'), (ad.ExceptionDocumenter.member_order, 'Exceptions'), (ad.FunctionDocumenter.member_order, 'Functions'), (ad.DataDocumenter.member_order, 'Data'), ]) """:class:`~collections.OrderedDict` that includes the autosummary sections
def __init__(self, items=None, identifier=None, parent=None): """ identifier: A string identifier for the current node (if any) parent: The parent node (if any) items: Items as (path, value) pairs to construct (sub)tree down to given leaf values. Note that the root node does not have a parent and does not require an identifier. """ self.__dict__['parent'] = parent self.__dict__['identifier'] = util.sanitize_identifier(identifier, escape=False) self.__dict__['children'] = [] self.__dict__['_fixed'] = False fixed_error = 'No attribute %r in this AttrTree, and none can be added because fixed=True' self.__dict__['_fixed_error'] = fixed_error self.__dict__['data'] = OrderedDict() items = items.items() if isinstance(items, OrderedDict) else items # Python 3 items = list(items) if items else items items = [] if not items else items for path, item in items: self.set_path(path, item)
def __setstate__(self, state): OLDEST_SUPPORTED_STATE = 1 version = state.pop(VERSION_LABEL) if version < OLDEST_SUPPORTED_STATE: raise BaseException("PerformancePeriod saved state is too old.") processed_transactions = defaultdict(list) processed_transactions.update(state.pop('processed_transactions')) orders_by_id = OrderedDict() orders_by_id.update(state.pop('orders_by_id')) orders_by_modified = defaultdict(OrderedDict) orders_by_modified.update(state.pop('orders_by_modified')) self.processed_transactions = processed_transactions self.orders_by_id = orders_by_id self.orders_by_modified = orders_by_modified # pop positions to use for v1 positions = state.pop('positions', None) self.__dict__.update(state) if version == 1: # version 1 had PositionTracker logic inside of Period # we create the PositionTracker here. # Note: that in V2 it is assumed that the position_tracker # will be dependency injected and so is not reconstructed assert positions is not None, "positions should exist in v1" position_tracker = PositionTracker() position_tracker.update_positions(positions) self.position_tracker = position_tracker
def test_yaml_linkage(self): # Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature. # In yaml, lists are native but tuples are not. pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) # yaml.dump(od) --> # '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n' self.assertTrue(all(type(pair) == list for pair in od.__reduce__()[1]))
def rollover(self): self.starting_value = self.ending_value self.starting_cash = self.ending_cash self.period_cash_flow = 0.0 self.pnl = 0.0 self.processed_transactions = defaultdict(list) self.orders_by_modified = defaultdict(OrderedDict) self.orders_by_id = OrderedDict()
def test_pickle_recursive(self): od = OrderedDict() od[1] = od for proto in range(-1, pickle.HIGHEST_PROTOCOL + 1): dup = pickle.loads(pickle.dumps(od, proto)) self.assertIsNot(dup, od) self.assertEqual(list(dup.keys()), [1]) self.assertIs(dup[1], dup)
def test_delitem(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] od = OrderedDict(pairs) del od['a'] self.assertNotIn('a', od) with self.assertRaises(KeyError): del od['a'] self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])