Beispiel #1
0
def solve():
    """Problem solution implementation."""
    def next_split(length):
        d = (length - 1) / 2
        return [int(round(d)), int(d)]

    n, k = [int(x) for x in raw_input().split()]

    # optimization
    if k == n:
        return '0 0'

    c = OrderedDict({n: 1})
    while c.viewkeys() and k > 1:
        k -= 1
        length = c.iterkeys().next()  # First key == largest key
        new_keys = filter(lambda x: x > 0, next_split(length))
        for nk in new_keys:
            if nk in c:
                c[nk] += 1
            else:
                c[nk] = 1
        # Delete obsolete keys to save memory
        c[length] -= 1
        if c[length] == 0:
            del c[length]

    l_r = next_split(c.iterkeys().next())
    return '{} {}'.format(max(l_r), min(l_r))
Beispiel #2
0
    class StubBasicAgent(agent_cls):
        def __init__(self, *args, **kwargs):
            self.args = args
            self.kwargs = kwargs
            self.request_history = OrderedDict()
            self.request_queue = OrderedDict()
            self.live_request_history = OrderedDict()

        @defer.inlineCallbacks
        def replay_live(self):
            """
            Performs live requests with a live agent. Requires networking.
            This is a tool that is useful for generating live responses for
            requests that have been recorded by the stub agent.

            Live requests will only be performed once per request.
            """
            live_agent = agent_cls(*self.args, **self.kwargs)
            for stub_response in self.request_history.viewkeys():
                if stub_response not in self.live_request_history:
                    args, kwargs = self.request_history[stub_response]
                    try:
                        live_response = yield live_agent.request(*args, **kwargs)
                    except Exception as e:  # pylint: disable=broad-except
                        live_response = e
                    self.live_request_history[stub_response] = ((args, kwargs), live_response)
            yield defer.succeed(None)

        def request(self, *args, **kwargs):
            d_response = defer.Deferred()
            self.request_history[d_response] = (args, kwargs)
            self.request_queue[d_response] = (args, kwargs)
            return d_response

        @staticmethod
        def stub_response(method, version, code, phrase, headers, body):
            """ Build a stub response object. """
            transport = StringTransport()
            res = client.Response(version, code, phrase, headers, transport)
            res._bodyDataReceived(body)
            res._bodyDataFinished()
            return BasicResponse()(res, method).result

        def respond(self, version, code, phrase, headers, body):
            """ Respond to requests in FIFO order. """
            d_response, params = self.request_queue.popitem(False)
            args, kwargs = params
            method = args[0] if args else kwargs['method']
            response = self.stub_response(method, version, code, phrase, headers, body)
            d_response.callback(response)

        def fail(self, reason):
            """
            Fail requests in FIFO order.
            reason Exception. An exception instance to pass to the errback chain.
            """
            d_response, _ = self.request_queue.popitem(False)
            d_response.errback(reason)
Beispiel #3
0
class DataSet(object):
    def __init__(self):
        self.entries = OrderedDict()

    def add_entry(self, entry):
        entry_id = getattr(entry, ID)

        assert entry_id not in self.entries.viewkeys()

        self.entries[entry_id] = entry

    def __getitem__(self, entry_id):
        return self.entries[entry_id]

    def __len__(self):
        return len(self.entries)

    def __iter__(self):
        return self.entries.itervalues()

    def __contains__(self, entry_id):
        return (entry_id in self.entries.viewkeys())
Beispiel #4
0
class DotMap(MutableMapping, OrderedDict):
    def __init__(self, *args, **kwargs):
        self._map = OrderedDict()
        self._dynamic = True
        if kwargs:
            if '_dynamic' in kwargs:
                self._dynamic = kwargs['_dynamic']
        if args:
            d = args[0]
            if isinstance(d, dict):
                for k, v in self.__call_items(d):
                    if isinstance(v, dict):
                        v = DotMap(v, _dynamic=self._dynamic)
                    if type(v) is list:
                        l = []
                        for i in v:
                            n = i
                            if type(i) is dict:
                                n = DotMap(i, _dynamic=self._dynamic)
                            l.append(n)
                        v = l
                    self._map[k] = v
        if kwargs:
            for k, v in self.__call_items(kwargs):
                if k is not '_dynamic':
                    self._map[k] = v

    def __call_items(self, obj):
        if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
            return obj.iteritems()
        else:
            return obj.items()

    def items(self):
        return self.iteritems()

    def iteritems(self):
        return self.__call_items(self._map)

    def __iter__(self):
        return self._map.__iter__()

    def next(self):
        return self._map.next()

    def __setitem__(self, k, v):
        self._map[k] = v

    def __getitem__(self, k):
        if k not in self._map and self._dynamic and k != '_ipython_canary_method_should_not_exist_':
            # automatically extend to new DotMap
            self[k] = DotMap()
        return self._map[k]

    def __setattr__(self, k, v):
        if k in {
                '_map', '_dynamic', '_ipython_canary_method_should_not_exist_'
        }:
            super(DotMap, self).__setattr__(k, v)
        else:
            self[k] = v

    def __getattr__(self, k):
        if k in {
                '_map', '_dynamic', '_ipython_canary_method_should_not_exist_'
        }:
            super(DotMap, self).__getattr__(k)
        else:
            return self[k]

    def __delattr__(self, key):
        return self._map.__delitem__(key)

    def __contains__(self, k):
        return self._map.__contains__(k)

    def __str__(self):
        items = []
        for k, v in self.__call_items(self._map):
            # bizarre recursive assignment situation (why someone would do this is beyond me)
            if id(v) == id(self):
                items.append('{0}=DotMap(...)'.format(k))
            else:
                items.append('{0}={1}'.format(k, repr(v)))
        joined = ', '.join(items)
        out = '{0}({1})'.format(self.__class__.__name__, joined)
        return out

    def __repr__(self):
        return str(self)

    def toDict(self):
        d = {}
        for k, v in self.items():
            if type(v) is DotMap:
                # bizarre recursive assignment support
                if id(v) == id(self):
                    v = d
                else:
                    v = v.toDict()
            elif type(v) in (list, tuple):
                l = []
                for i in v:
                    n = i
                    if type(i) is DotMap:
                        n = i.toDict()
                    l.append(n)
                if type(v) is tuple:
                    v = tuple(l)
                else:
                    v = l
            d[k] = v
        return d

    def pprint(self, pformat='dict'):
        if pformat == 'json':
            print(dumps(self.toDict(), indent=4, sort_keys=True))
        else:
            pprint(self.toDict())

    def empty(self):
        return (not any(self))

    # proper dict subclassing
    def values(self):
        return self._map.values()

    # ipython support
    def __dir__(self):
        return self.keys()

    @classmethod
    def parseOther(self, other):
        if type(other) is DotMap:
            return other._map
        else:
            return other

    def __cmp__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__cmp__(other)

    def __eq__(self, other):
        other = DotMap.parseOther(other)
        if not isinstance(other, dict):
            return False
        return self._map.__eq__(other)

    def __ge__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ge__(other)

    def __gt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__gt__(other)

    def __le__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__le__(other)

    def __lt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__lt__(other)

    def __ne__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ne__(other)

    def __delitem__(self, key):
        return self._map.__delitem__(key)

    def __len__(self):
        return self._map.__len__()

    def clear(self):
        self._map.clear()

    def copy(self):
        return DotMap(self)

    def __copy__(self):
        return self.copy()

    def __deepcopy__(self, memo=None):
        return self.copy()

    def get(self, key, default=None):
        return self._map.get(key, default)

    def has_key(self, key):
        return key in self._map

    def iterkeys(self):
        return self._map.iterkeys()

    def itervalues(self):
        return self._map.itervalues()

    def keys(self):
        return self._map.keys()

    def pop(self, key, default=None):
        return self._map.pop(key, default)

    def popitem(self):
        return self._map.popitem()

    def setdefault(self, key, default=None):
        self._map.setdefault(key, default)

    def update(self, *args, **kwargs):
        if len(args) != 0:
            self._map.update(*args)
        self._map.update(kwargs)

    def viewitems(self):
        return self._map.viewitems()

    def viewkeys(self):
        return self._map.viewkeys()

    def viewvalues(self):
        return self._map.viewvalues()

    @classmethod
    def fromkeys(cls, seq, value=None):
        d = DotMap()
        d._map = OrderedDict.fromkeys(seq, value)
        return d

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)

    # bannerStr
    def _getListStr(self, items):
        out = '['
        mid = ''
        for i in items:
            mid += '  {}\n'.format(i)
        if mid != '':
            mid = '\n' + mid
        out += mid
        out += ']'
        return out

    def _getValueStr(self, k, v):
        outV = v
        multiLine = len(str(v).split('\n')) > 1
        if multiLine:
            # push to next line
            outV = '\n' + v
        if type(v) is list:
            outV = self._getListStr(v)
        out = '{} {}'.format(k, outV)
        return out

    def _getSubMapDotList(self, pre, name, subMap):
        outList = []
        if pre == '':
            pre = name
        else:
            pre = '{}.{}'.format(pre, name)

        def stamp(pre, k, v):
            valStr = self._getValueStr(k, v)
            return '{}.{}'.format(pre, valStr)

        for k, v in subMap.items():
            if isinstance(v, DotMap) and v != DotMap():
                subList = self._getSubMapDotList(pre, k, v)
                outList.extend(subList)
            else:
                outList.append(stamp(pre, k, v))
        return outList

    def _getSubMapStr(self, name, subMap):
        outList = ['== {} =='.format(name)]
        for k, v in subMap.items():
            if isinstance(v, DotMap) and v != DotMap():
                # break down to dots
                subList = self._getSubMapDotList('', k, v)
                # add the divit
                # subList = ['> {}'.format(i) for i in subList]
                outList.extend(subList)
            else:
                out = self._getValueStr(k, v)
                # out = '> {}'.format(out)
                out = '{}'.format(out)
                outList.append(out)
        finalOut = '\n'.join(outList)
        return finalOut

    def bannerStr(self):
        lines = []
        previous = None
        for k, v in self.items():
            if previous == 'DotMap':
                lines.append('-')
            out = ''
            if isinstance(v, DotMap):
                name = k
                subMap = v
                out = self._getSubMapStr(name, subMap)
                lines.append(out)
                previous = 'DotMap'
            else:
                out = self._getValueStr(k, v)
                lines.append(out)
                previous = 'other'
        lines.append('--')
        s = '\n'.join(lines)
        return s
class DotMap(OrderedDict):
    def __init__(self, *args, **kwargs):
        self._map = OrderedDict()
        if args:
            d = args[0]
            if type(d) is dict:
                for k, v in self.__call_items(d):
                    if type(v) is dict:
                        v = DotMap(v)
                    self._map[k] = v
        if kwargs:
            for k, v in self.__call_items(kwargs):
                self._map[k] = v

    def __call_items(self, obj):
        if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
            return obj.iteritems()
        else:
            return obj.items()

    def items(self):
        return self.iteritems()

    def iteritems(self):
        return self.__call_items(self._map)

    def __iter__(self):
        return self._map.__iter__()

    def next(self):
        return self._map.next()

    def __setitem__(self, k, v):
        self._map[k] = v

    def __getitem__(self, k):
        if k not in self._map:
            # DON'T automatically extend to new DotMap
            # self[k] = DotMap()
            raise AttributeError('%s is not defined in DotMap' % k)
        return self._map[k]

    def __setattr__(self, k, v):
        if k == '_map':
            super(DotMap, self).__setattr__(k, v)
        else:
            self[k] = v

    def __getattr__(self, k):
        if k == '_map':
            super(DotMap, self).__getattr__(k)
        else:
            return self[k]

    def __delattr__(self, key):
        return self._map.__delitem__(key)

    def __contains__(self, k):
        return self._map.__contains__(k)

    def __str__(self):
        items = []
        for k, v in self.__call_items(self._map):
            items.append('{0}={1}'.format(k, repr(v)))
        out = 'DotMap({0})'.format(', '.join(items))
        return out

    def __repr__(self):
        return str(self)

    def toDict(self):
        d = {}
        for k, v in self.items():
            if type(v) is DotMap:
                v = v.toDict()
            d[k] = v
        return d

    def pprint(self):
        pprint(self.toDict())

    # proper dict subclassing
    def values(self):
        return self._map.values()

    @classmethod
    def parseOther(self, other):
        if type(other) is DotMap:
            return other._map
        else:
            return other

    def __cmp__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__cmp__(other)

    def __eq__(self, other):
        other = DotMap.parseOther(other)
        if not isinstance(other, dict):
            return False
        return self._map.__eq__(other)

    def __ge__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ge__(other)

    def __gt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__gt__(other)

    def __le__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__le__(other)

    def __lt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__lt__(other)

    def __ne__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ne__(other)

    def __delitem__(self, key):
        return self._map.__delitem__(key)

    def __len__(self):
        return self._map.__len__()

    def clear(self):
        self._map.clear()

    def copy(self):
        return self

    def get(self, key, default=None):
        return self._map.get(key, default)

    def has_key(self, key):
        return key in self._map

    def iterkeys(self):
        return self._map.iterkeys()

    def itervalues(self):
        return self._map.itervalues()

    def keys(self):
        return self._map.keys()

    def pop(self, key, default=None):
        return self._map.pop(key, default)

    def popitem(self):
        return self._map.popitem()

    def setdefault(self, key, default=None):
        self._map.setdefault(key, default)

    def update(self, *args, **kwargs):
        if len(args) != 0:
            self._map.update(*args)
        self._map.update(kwargs)

    def viewitems(self):
        return self._map.viewitems()

    def viewkeys(self):
        return self._map.viewkeys()

    def viewvalues(self):
        return self._map.viewvalues()

    @classmethod
    def fromkeys(cls, seq, value=None):
        d = DotMap()
        d._map = OrderedDict.fromkeys(seq, value)
        return d
Beispiel #6
0
class DotMap(MutableMapping, OrderedDict):
    def __init__(self, *args, **kwargs):
        self._map = OrderedDict()
        self._dynamic = True
        if kwargs:
            if "_dynamic" in kwargs:
                self._dynamic = kwargs["_dynamic"]
        if args:
            d = args[0]
            # for recursive assignment handling
            trackedIDs = {id(d): self}
            if isinstance(d, dict):
                for k, v in self.__call_items(d):
                    if isinstance(v, dict):
                        if id(v) in trackedIDs:
                            v = trackedIDs[id(v)]
                        else:
                            v = self.__class__(v, _dynamic=self._dynamic)
                            trackedIDs[id(v)] = v
                    if type(v) is list:
                        l = []
                        for i in v:
                            n = i
                            if isinstance(i, dict):
                                n = self.__class__(i, _dynamic=self._dynamic)
                            l.append(n)
                        v = l
                    self._map[k] = v
        if kwargs:
            for k, v in self.__call_items(kwargs):
                if k is not "_dynamic":
                    self._map[k] = v

    def __call_items(self, obj):
        if hasattr(obj, "iteritems") and ismethod(getattr(obj, "iteritems")):
            return obj.iteritems()
        else:
            return obj.items()

    def items(self):
        return self.iteritems()

    def iteritems(self):
        return self.__call_items(self._map)

    def __iter__(self):
        return self._map.__iter__()

    def next(self):
        return self._map.next()

    def __setitem__(self, k, v):
        self._map[k] = v

    def __getitem__(self, k):
        return self._map[k]

    def __setattr__(self, k, v):
        if k in {
                "_map",
                "_dynamic",
                "_ipython_canary_method_should_not_exist_",
        }:
            super(DotMap, self).__setattr__(k, v)
        else:
            self[k] = v

    def __getattr__(self, k):
        if k in {
                "_map",
                "_dynamic",
                "_ipython_canary_method_should_not_exist_",
        }:
            return super(DotMap, self).__getattr__(k)

        try:
            v = super(self.__class__, self).__getattribute__(k)
            return v
        except AttributeError:
            pass

        return self[k]

    def __delattr__(self, key):
        return self._map.__delitem__(key)

    def __contains__(self, k):
        return self._map.__contains__(k)

    def __add__(self, other):
        if self.empty():
            return other
        else:
            self_type = type(self).__name__
            other_type = type(other).__name__
            msg = "unsupported operand type(s) for +: '{}' and '{}'"
            raise TypeError(msg.format(self_type, other_type))

    def __str__(self):
        items = []
        for k, v in self.__call_items(self._map):
            # recursive assignment case
            if id(v) == id(self):
                items.append("{0}={1}(...)".format(k, self.__class__.__name__))
            else:
                seperator = "\n" if isinstance(v, DotMap) else " "
                attr_str = f"{k}:{seperator}{v}"
                attr_str = self._indent(attr_str, 2)
                items.append(attr_str)
        joined = "\n".join(items)
        return joined

    def __repr__(self):
        return str(self)

    def toDict(self):
        d = {}
        for k, v in self.items():
            if issubclass(type(v), DotMap):
                # bizarre recursive assignment support
                if id(v) == id(self):
                    v = d
                else:
                    v = v.toDict()
            elif type(v) in (list, tuple):
                l = []
                for i in v:
                    n = i
                    if issubclass(type(i), DotMap):
                        n = i.toDict()
                    l.append(n)
                if type(v) is tuple:
                    v = tuple(l)
                else:
                    v = l
            d[k] = v
        return d

    def pprint(self, pformat="dict"):
        if pformat == "json":
            print(dumps(self.toDict(), indent=4, sort_keys=True))
        else:
            pprint(self.toDict())

    def empty(self):
        return not any(self)

    # proper dict subclassing
    def values(self):
        return self._map.values()

    # # ipython support
    def __dir__(self):
        return self.keys()

    @classmethod
    def parseOther(self, other):
        if issubclass(type(other), DotMap):
            return other._map
        else:
            return other

    def __cmp__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__cmp__(other)

    def __eq__(self, other):
        other = DotMap.parseOther(other)
        if not isinstance(other, dict):
            return False
        return self._map.__eq__(other)

    def __ge__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ge__(other)

    def __gt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__gt__(other)

    def __le__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__le__(other)

    def __lt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__lt__(other)

    def __ne__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ne__(other)

    def __delitem__(self, key):
        return self._map.__delitem__(key)

    def __len__(self):
        return self._map.__len__()

    def clear(self):
        self._map.clear()

    def copy(self):
        return self.__class__(self)

    def __copy__(self):
        return self.copy()

    def __deepcopy__(self, memo=None):
        return self.copy()

    def get(self, key, default=None):
        return self._map.get(key, default)

    def has_key(self, key):
        return key in self._map

    def iterkeys(self):
        return self._map.iterkeys()

    def itervalues(self):
        return self._map.itervalues()

    def keys(self):
        return self._map.keys()

    def pop(self, key, default=None):
        return self._map.pop(key, default)

    def popitem(self):
        return self._map.popitem()

    def setdefault(self, key, default=None):
        return self._map.setdefault(key, default)

    def update(self, *args, **kwargs):
        if len(args) != 0:
            self._map.update(*args)
        self._map.update(kwargs)

    def viewitems(self):
        return self._map.viewitems()

    def viewkeys(self):
        return self._map.viewkeys()

    def viewvalues(self):
        return self._map.viewvalues()

    @classmethod
    def fromkeys(cls, seq, value=None):
        d = cls()
        d._map = OrderedDict.fromkeys(seq, value)
        return d

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)

    # bannerStr
    def _getListStr(self, items):
        out = "["
        mid = ""
        for i in items:
            mid += "  {}\n".format(i)
        if mid != "":
            mid = "\n" + mid
        out += mid
        out += "]"
        return out

    def _getValueStr(self, k, v):
        outV = v
        multiLine = len(str(v).split("\n")) > 1
        if multiLine:
            # push to next line
            outV = "\n" + v
        if type(v) is list:
            outV = self._getListStr(v)
        out = "{} {}".format(k, outV)
        return out

    def _getSubMapDotList(self, pre, name, subMap):
        outList = []
        if pre == "":
            pre = name
        else:
            pre = "{}.{}".format(pre, name)

        def stamp(pre, k, v):
            valStr = self._getValueStr(k, v)
            return "{}.{}".format(pre, valStr)

        for k, v in subMap.items():
            if isinstance(v, DotMap) and v != DotMap():
                subList = self._getSubMapDotList(pre, k, v)
                outList.extend(subList)
            else:
                outList.append(stamp(pre, k, v))
        return outList

    def _getSubMapStr(self, name, subMap):
        outList = ["== {} ==".format(name)]
        for k, v in subMap.items():
            if isinstance(v, self.__class__) and v != self.__class__():
                # break down to dots
                subList = self._getSubMapDotList("", k, v)
                # add the divit
                # subList = ['> {}'.format(i) for i in subList]
                outList.extend(subList)
            else:
                out = self._getValueStr(k, v)
                # out = '> {}'.format(out)
                out = "{}".format(out)
                outList.append(out)
        finalOut = "\n".join(outList)
        return finalOut

    def bannerStr(self):
        lines = []
        previous = None
        for k, v in self.items():
            if previous == self.__class__.__name__:
                lines.append("-")
            out = ""
            if isinstance(v, self.__class__):
                name = k
                subMap = v
                out = self._getSubMapStr(name, subMap)
                lines.append(out)
                previous = self.__class__.__name__
            else:
                out = self._getValueStr(k, v)
                lines.append(out)
                previous = "other"
        lines.append("--")
        s = "\n".join(lines)
        return s

    def _indent(self, s_, num_spaces):
        s = s_.split("\n")
        if len(s) == 1:
            return s_
        first = s.pop(0)
        s = [(num_spaces * " ") + line for line in s]
        s = "\n".join(s)
        s = first + "\n" + s
        return s
Beispiel #7
0
class DataTable(object):

    def __init__(self, iterable=None, headers=None, value_if_missing=None):
        """
        You must pass in an iterable of:

        1. dict, where the keys will be counted as the headers ("fields"),
        2. list/tuple/generator, where the first will be assumed
           to be the fields.
        3. DataRow, from a previous DataTable.

        If your list of lists data doesn't have headers ("fields"),
        make some and pass them into the `headers` parameter.

        If your data has headers and you pass in headers anyways, headers
        acts as a filter and selects the subset of headers you want included.
        If you pass in a header that isn't in the data, there will be an error.

        ---

        If your data is CSV, TSV, or similar format, you can even copy-paste
        it the relevant script for on-the-fly DataTable construction. See
        the DataTable.fromcsvstring() method for details.
        """
        self.__data = OrderedDict()

        if iterable is None:
            # TODO: this exists so that we can create a DataTable
            # TODO: with no data, but we can make headers
            # TODO: what's the best way to address this headers issue?
            if headers is not None:
                validate_fields(headers)
                for header in headers:
                    self.__data[header] = []
            return

        if not hasattr(iterable, '__iter__'):
            raise Exception("DataTable takes an iterable and "
                            "%s is not an iterable" % type(iterable))

        iterator = iterable.__iter__()
        first_row = iterator.next()

        # also identifies OrderedDict
        if isinstance(first_row, dict):
            if not headers:
                fields = first_row.keys()
            else:
                fields = headers
            validate_fields(fields)
            for field in fields:
                self.__data[field] = [first_row[field]]
            for i, item in enumerate(iterator, 1):
                for field in self.fields:
                    try:
                        value = item[field]
                    except KeyError:
                        if value_if_missing is not None:
                            self.__data[field].append(value_if_missing)
                            continue
                        missing = self.__data.viewkeys()-item.viewkeys()
                        raise KeyError("Row %s is missing fields: %s" %
                                       (i, missing))
                    except TypeError:
                        raise TypeError("Although the first row of your data "
                                        "was a `dict`-like object, "
                                        "row %s was: %s" % (i, type(item)))
                    self.__data[field].append(value)
        elif isinstance(first_row, (list, tuple, GeneratorType)):
            # identifies namedtuples, and similar, including this library's
            # DataRow object. in their case, not only will the first row
            # not be headers, but we must access `._fields` to get
            # the header information. from then on, they should be the same.
            if isinstance(first_row, tuple) and hasattr(first_row, '_fields'):
                if not headers:
                    fields = first_row._fields
                else:
                    fields = headers
                validate_fields(fields)
                for field, value in izip(fields, first_row):
                    self.__data[field] = [value]
            else:
                if not headers:
                    fields = list(first_row)
                else:
                    fields = headers
                    iterator = chain((first_row,), iterator)
                validate_fields(fields)
                for field in fields:
                    self.__data[field] = []

            for i, item in enumerate(iterator):
                if not isinstance(item, (list, tuple, GeneratorType)):
                    raise TypeError("Although the first row of your data "
                                    "was a `list`, `tuple`, or `generator`"
                                    "-like object, row %s was: "
                                    "%s" % (i, type(item)))
                if not hasattr(item, '__len__'):
                    item = tuple(item)
                if len(self.fields) != len(item):
                    raise Exception("Row %s's length (%s) does not match "
                                    "headers' length (%s)" % (i,
                                                              len(self.fields),
                                                              len(item)))
                for field, value in izip(self.fields, item):
                    self.__data[field].append(value)
        else:
            raise Exception("Unrecognized row type: %s" % type(first_row))

    @property
    def fields(self):
        """
        A shallow copy of the list of fields in the DataTable.

        If you modify the DataTable, this list will not update.
        """
        return self.__data.keys()

    @fields.setter
    def fields(self, new_fieldnames):
        """
        Overwrite all field names with new field names. Mass renaming.
        """
        if len(new_fieldnames) != len(self.fields):
            raise Exception("Cannot replace fieldnames (len: %s) with list of "
                            "incorrect length (len: %s)" % (len(new_fieldnames),
                                                            len(self.fields)))
        for old_name, new_name in izip(self.fields, new_fieldnames):
            # use pop instead of `del` in case old_name == new_name
            self.__data[new_name] = self.__data.pop(old_name)

    @classmethod
    def fromcolumns(cls, fields, columns):
        if len(fields) != len(columns):
            raise Exception("When constructing .fromcolumns, the number of "
                            "fields (%s) must equal the number of columns (%s)"
                            % (len(fields), len(columns)))
        new_table = cls()
        for field, column in izip(fields, columns):
            new_table[field] = column
        return new_table

    @classmethod
    def fromcsv(cls, path, delimiter=",", headers=None):
        f = open(path, 'r')
        reader = UnicodeRW.UnicodeDictReader(f,
                                             delimiter=delimiter)
        new_table = cls(reader, headers=headers)
        f.close()
        return new_table

    @classmethod
    def fromcsvstring(cls, csvstring, delimiter=",", quotechar="\""):
        """
        Takes one string that represents the entire contents of the CSV
        file, or similar delimited file.

        If you have a list of lists, where the first list is the headers,
        then use the main constructor.

        If you see an excess of whitespace in the first column of your data,
        this is probably because you tried to format a triple-quoted string
        literal nicely. Don't add any padding to the left.

        NOTE: Please prefix your triple-quoted string literal with `u` or `ur`
        as necessary. For copy-pasting directly from Excel, use `ur`. For
        copy-pasting from something Python (or similar) printed, use `ur`.
        For something just dumped from Python via __repr__ or some other
        text source that displays escape characters used, use `u`.

        ---

        Implementation notes:

        This solution was inspired by UnicodeRW.
        cStringIO.StringIO turns the passed string into a file-like
        (readble) object. The string must be encoded so that StringIO
        presents encoded text.

        In UnicodeRW, codecs.getreader('utf-8') reads an encoded file object
        to product a decoded file object on the fly. We don't need this.

        We read the StringIO object line by line into csv.reader,
        which is consumes encoded text and parses the CSV format out of it.
        Then we decode each cell one by one as we pass it into the data table

        csv.QUOTE_NONE (as well as the r-prefix on r'''string''') are vital
        since we're copy-pasting directly from Excel. The string should be
        treated as "literally" ("raw") as possible.
        """
        if not isinstance(csvstring, basestring):
            raise Exception("If trying to construct a DataTable with "
                            "a list of lists, just use the main "
                            "constructor. Make sure to include a header row")

        stringio = StringIO(csvstring.encode('utf-8'))
        csv_data = csv.reader((line for line in stringio),
                              delimiter=delimiter,
                              dialect=csv.excel,
                              quotechar=quotechar,
                              quoting=csv.QUOTE_NONE)
        new_datatable = cls((s.decode('utf-8') for s in row)
                            for row in csv_data)
        for field in new_datatable.fields:
            new_datatable[field] = parse_column(new_datatable[field])
        return new_datatable

    @classmethod
    def fromdict(cls, datadict):
        """
        Constructs a new DataTable using a dictionary of the format:

        {field1: [a,b,c],
         field2: [d,e,f],
         field3: [g,h,i]}

        ... which most closely matches the internal representation
        of the DataTable. If it is an OrderedDict, the key order
        will be preserved.
        """
        new_datatable = cls()
        for field, column in datadict.items():
            new_datatable[field] = column
        return new_datatable

    @classmethod
    def fromexcel(cls, path, sheet_name_or_num=0, headers=None):
        """
        Constructs a new DataTable from an Excel file.

        Specify sheet_name_or_number to load that specific sheet.

        Headers will be inferred automatically, but if you'd prefer
        to load only a subset of all the headers, pass in a list of the
        headers you'd like as `headers`.

        ---

        Alternatively, it's quite simple to:

            reader = ExcelReader('myfile.xls')
            reader.change_sheet('default')
            data = DataTable(reader)
        """
        reader = ExcelRW.UnicodeDictReader(path, sheet_name_or_num)
        return cls(reader, headers=headers)

    def __add__(self, other_datatable):
        return self.concat(other_datatable)

    def __contains__(self, fieldname):
        return fieldname in self.__data.viewkeys()

    def __delitem__(self, key):
        del self.__data[key]

    def __eq__(self, other):
        """
        Note that there is a bug (in my opinion) where two OrderedDicts
        are considered equal even if one dict has more key-value pairs
        after the initial matching set.

        The line where we compare the length of the two DataTables and
        the number of keys is meant to protect against this bug.
        """
        if not isinstance(other, DataTable):
            raise TypeError("Cannot compare DataTables with `%s` "
                            "for equality" % type(other))
        if len(self) != len(other) or len(self.fields) != len(other.fields):
            return False
        for selfrow, otherrow in izip(self, other):
            if selfrow != otherrow:
                return False
        return True

    def __getitem__(self, item):
        """
        Pass in a fieldname to retrieve a column:
        column = dt['column_name']

        Or slice the DataTable like a list:
        sliced = dt[:30:2]
        """
        if isinstance(item, slice):
            start, stop, step = item.indices(len(self))
            sliced_table = DataTable()
            for field in self.fields:
                sliced_table[field] = self.__data[field][start:stop:step]
            return sliced_table
        elif isinstance(item, (list, tuple)):
            return [self.__getitem__(colname) for colname in item]
        elif isinstance(item, basestring):
            if item not in self:
                raise KeyError("DataTable does not have column `%s`" % item)
            return self.__data[item]
        elif isinstance(item, (int, long)):
            return self.row(item)
        else:
            raise KeyError("DataTable does not support indexing with `%s`" %
                           type(item))

    def __len__(self):
        if not self.__data:
            return 0
        else:
            return len(self.__data.viewvalues().__iter__().next())

    def __repr__(self):
        return str(self)

    def __setitem__(self, fieldname, column):
        """
        Sets a column with the specified name to the specified value:

        dt['new_column'] = [1, 2, 3]

        1. If the column name doesn't exist, it will be created.
        2. If the column value provided is a tuple, it will be cast to a list.
        3. If the column value isn't a list, tuple, or array, it will
           be assumed that you're trying to set a whole column to some scalar
           value. For example:

           dt['another_column'] = True

           ... will set the entire column, for the length of the table, equal
           to `True`.
        """
        if not isinstance(column, (list, array)):
            if isinstance(column, tuple):
                column = list(column)
            else:
                column = [column] * len(self)
        if self.__data and len(column) != len(self):
            raise Exception("New column length (%s) must match length "
                            "of table (%s)" % (len(column), len(self)))
        self.__data[fieldname] = column

    def __str__(self):
        return unicode(self).encode('utf-8')

    def __unicode__(self):
        return self.pretty

    def __print_table(self, row_delim, header_delim=None,
                      header_pad=u"", pad=u""):
        """
        row_delim         default delimiter inserted between columns of every
                          row in the table.
        header_delim      delimiter inserted within the headers. by default
                          takes the value of `row_delim`
        header_pad        put on the sides of the row of headers.
        pad               put on the sides of every row.
        """
        if header_delim is None:
            header_delim = row_delim
        num_cols = len(self.fields)
        accumulator = ((u"%s" + header_delim) * num_cols)[:-len(header_delim)]
        accumulator = ((header_pad + accumulator + header_pad + u"\n") %
                       tuple(self.fields))
        for datarow in self:
            rowstring = ((u"%s" + row_delim) * num_cols)[:-len(row_delim)]
            rowstring = (pad + rowstring + pad + u"\n") % tuple(datarow)
            accumulator += rowstring
        return accumulator[:-1]

    @property
    def html(self):
        accumulator = u"<table>"
        accumulator += u"<tr>" + u"".join([u"<th>"+field+u"</th>"
                                           for field in self.fields]) + u"</tr>"
        for datarow in self:
            accumulator += u"<tr>" + u"".join([u"<td>"+unicode(row)+u"</td>"
                                               for row in datarow]) + u"</tr>"
        return accumulator + u"</table>"

    @property
    def jira(self):
        header, row = u"||", u"|"
        return self.__print_table(row_delim=row,
                                  header_delim=header,
                                  header_pad=header,
                                  pad=row)

    # TODO: print a "prettytable" style table
    @property
    def pretty(self):
        return self.t

    @property
    def t(self):
        return self.__print_table(u"\t")

    def append(self, row):
        """
        Takes a dict, a list/tuple/generator, or a DataRow/namedtuple,
        and appends it to the "bottom" or "end" of the DataTable.

        dicts must share the same keys as the DataTable's columns.

        lists/tuples/generators are simply trusted to be in the correct order
        and of the correct type (if relevant).

        If the table being appended to is empty, the columns are inferred
        from the row being appended.

        DataRows and namedtuples' `_fields` protected class attribute is
        checked for the field names. Those are checked against the DataTable
        and then appended to the relevant columns using those field names.
        """
        if isinstance(row, dict):
            if self.fields and not set(row.keys()) == set(self.fields):
                raise Exception("Cannot append a dict to DataTable without "
                                "all keys matching (order being irrelevant).\n"
                                "dict: %s\nDataTable: %s" % (row.keys(),
                                                             self.fields))
            if not self.fields:
                for field in row.keys():
                    self.__data[field] = [row[field]]
            else:
                for field in self.fields:
                    self.__data[field].append(row[field])
        elif isinstance(row, (list, tuple, GeneratorType)):
            if isinstance(row, tuple) and hasattr(row, '_fields'):
                fieldnames = row._fields
                if self.fields and not set(fieldnames) == set(self.fields):
                    raise Exception("Cannot append a Datarow or namedtuple to "
                                    "DataTable without all fields matching "
                                    "(order being irrelevant).\n"
                                    "DataRow/namedtuple: %s\n"
                                    "DataTable: %s" % (fieldnames, self.fields))
                if not self.fields:
                    for fieldname, value in izip(fieldnames, row):
                        self.__data[fieldname] = [value]
                else:
                    for fieldname, value in izip(fieldnames, row):
                        self.__data[fieldname].append(value)
            else:
                if isinstance(row, GeneratorType):
                    row = tuple(row)
                if self.fields and not len(row) == len(self.fields):
                    raise Exception("The row being appended does not have the "
                                    "correct length. It should have a length "
                                    "of %s, but is %s" % (len(self.fields),
                                                          len(row)))
                if not self.fields:
                    raise Exception("Can't append a list/tuple/GeneratorType "
                                    "as a row if the table doesn't have "
                                    "columns defined yet.")
                # we're just going to hope that the generator's contents are
                # provided in the right order, and of the right type.
                for (_, column), element in izip(self.__data.items(), row):
                    column.append(element)
        else:
            raise Exception("Unable to append type `%s` to DataTable" %
                            type(row))

    def apply(self, func, *fields):
        """
        Applies the function, `func`, to every row in the DataTable.

        If no fields are supplied, the entire row is passed to `func`.
        If fields are supplied, the values at all of those fields
        are passed into func in that order.
        ---
        data['diff'] = data.apply(short_diff, 'old_count', 'new_count')
        """
        results = []
        for row in self:
            if not fields:
                results.append(func(row))
            else:
                if any(field not in self for field in fields):
                    for field in fields:
                        if field not in self:
                            raise Exception("Column `%s` does not exist "
                                            "in DataTable" % field)
                results.append(func(*[row[field] for field in fields]))
        return results

    def col(self, col_name_or_num):
        """
        Returns the col at index `colnum` or name `colnum`.
        """
        if isinstance(col_name_or_num, basestring):
            return self[col_name_or_num]
        elif isinstance(col_name_or_num, (int, long)):
            if col_name_or_num > len(self.fields):
                raise IndexError("Invalid column index `%s` for DataTable" %
                                 col_name_or_num)
            return self.__data[self.fields[col_name_or_num]]

    def concat(self, other_datatable, inplace=False):
        """
        Concatenates two DataTables together, as long as column names
        are identical (ignoring order). The resulting DataTable's columns
        are in the order of the table whose `concat` method was called.
        """
        if not isinstance(other_datatable, DataTable):
            raise TypeError("`concat` requires a DataTable, not a %s" %
                            type(other_datatable))

        # if the self table is empty, we can just return the other table
        # if we need to do it in place, we just copy over the columns
        if not self.fields:
            if inplace:
                for field in other_datatable.fields:
                    self[field] = other_datatable[field]
                return self
            else:
                return other_datatable
        if not other_datatable.fields:
            return self

        if set(self.fields) != set(other_datatable.fields):
            raise Exception("Columns do not match:\nself: %s\nother: %s" %
                            (self.fields, other_datatable.fields))

        if inplace:
            for field in self.fields:
                self.__data[field] = self[field] + other_datatable[field]
            return self
        else:
            new_table = DataTable()
            for field in self.fields:
                new_table[field] = self[field] + other_datatable[field]
            return new_table

    def copy(self):
        return self.fromdict(self.__data)

    def distinct(self, fieldname, key=None):
        """
        Returns the unique values seen at `fieldname`.
        """
        return tuple(unique_everseen(self[fieldname], key=key))

    def groupby(self, *groupfields):
        """
        Groups rows in this table according to the unique combinations of
        `groupfields` combined.
        """
        return GroupbyTable(self, groupfields)

    # TODO: this is a placeholder and only does a very simple left join.
    def join(self, right_table, on):
        keymap = {}
        for row in right_table:
            if row[on] in keymap:
                keymap[row[on]].append(row)
            else:
                keymap[row[on]] = [row]
        new_table = []
        for row in self:
            if row[on] in keymap:
                left_dict = dict(row.items())
                for item in keymap[row[on]]:
                    left_dict_copy = left_dict.copy()
                    left_dict_copy.update(dict(item.items()))
                    new_table.append(left_dict_copy)
        return DataTable(new_table)

    def mask(self, masklist):
        """
        `masklist` is an array of Bools or equivalent.

        This returns a new DataTable using only the rows that were True
        (or equivalent) in the mask.
        """
        if not hasattr(masklist, '__len__'):
            masklist = tuple(masklist)

        if len(masklist) != len(self):
            raise Exception("Masklist length (%s) must match length "
                            "of DataTable (%s)" % (len(masklist), len(self)))

        new_datatable = DataTable()
        for field in self.fields:
            new_datatable[field] = list(compress(self[field], masklist))
        return new_datatable

    def mutapply(self, function, fieldname):
        """
        Applies `function` in-place to the field name specified.

        In other words, `mutapply` overwrites column `fieldname`
        ith the results of applying `function` to each element of that column.
        """
        self[fieldname] = self.apply(function, fieldname)

    def rename(self, old_fieldname, new_fieldname):
        """
        Renames a specific field, and preserves the underlying order.
        """
        if old_fieldname not in self:
            raise Exception("DataTable does not have field `%s`" %
                            old_fieldname)

        if not isinstance(new_fieldname, basestring):
            raise ValueError("DataTable fields must be strings, not `%s`" %
                             type(new_fieldname))

        if old_fieldname == new_fieldname:
            return

        new_names = self.fields
        location = new_names.index(old_fieldname)
        del new_names[location]
        new_names.insert(location, new_fieldname)
        self.fields = new_names

    def reorder(self, fields_in_new_order):
        """
        Pass in field names in the order you wish them to be swapped.
        """
        if not len(fields_in_new_order) == len(self.fields):
            raise Exception("Fields to reorder with are not the same length "
                            "(%s) as the original fields (%s)" %
                            (len(fields_in_new_order), len(self.fields)))
        if not set(fields_in_new_order) == set(self.fields):
            raise Exception("Fields to reorder with should be the same "
                            "as the original fields")
        new = OrderedDict()
        for field in fields_in_new_order:
            new[field] = self.__data[field]
        self.__data = new

    def row(self, rownum):
        """
        Returns the row at index `rownum`.
        ---
        Note that the DataRow object returned that represents the data row
        is constructed on the fly and is a just a shallow copy of
        the underlying data that does not update dynamically.
        """
        if rownum > len(self):
            raise IndexError("Invalid row index `%s` for DataTable" % rownum)
        return datarow_constructor(self.fields)([self[field][rownum]
                                                 for field in self.fields])

    def sample(self, num):
        """
        Returns a new table with rows randomly sampled.

        We create a mask with `num` True bools, and fill it with False bools
        until it is the length of the table. We shuffle it, and apply that
        mask to the table.
        """
        if num > len(self):
            return self.copy()
        elif num < 0:
            raise IndexError("Cannot sample a negative number of rows "
                             "from a DataTable")

        random_row_mask = ([True] * num) + ([False] * (len(self) - num))
        shuffle(random_row_mask)

        sampled_table = self.mask(random_row_mask)
        random_col_name = 'random_sorting_column'
        while random_col_name in sampled_table:
            random_col_name = '%030x' % randrange(16**30)
        sampled_table[random_col_name] = [random()
                                          for _ in xrange(len(sampled_table))]
        sampled_table.sort(random_col_name, inplace=True)
        del sampled_table[random_col_name]
        return sampled_table

    def sort(self, fieldname, key=lambda x: x, desc=False, inplace=False):
        """
        This matches Python's built-in sorting signature closely.

        By default, a new DataTable will be returned and the original will
        not be mutated. If preferred, specify `inplace=True` in order to
        mutate the original table. Either way, a reference to the relevant
        table will be returned.
        """
        try:
            field_index = tuple(self.fields).index(fieldname)
        except ValueError:
            raise ValueError("Sorting on a field that doesn't exist: `%s`" %
                             fieldname)

        data_cols = izip(*sorted(izip(*[self.__data[field]
                                        for field in self.fields]),
                                 key=lambda row: key(row[field_index]),
                                 reverse=desc))

        target_table = self if inplace else DataTable()

        for field, data_col in izip(self.fields, data_cols):
            target_table[field] = list(data_col)

        # Note that sorting in-place still returns a reference
        # to the table being sorted, for convenience.
        return target_table

    def where(self, fieldname, value, negate=False):
        """
        Returns a new DataTable with rows only where the value at
        `fieldname` == `value`.
        """
        if negate:
            return self.mask([elem != value
                              for elem in self[fieldname]])
        else:
            return self.mask([elem == value
                              for elem in self[fieldname]])

    def wherefunc(self, func, negate=False):
        """
        Applies a function to an entire row and filters the rows based on the
        boolean output of that function.
        """
        if negate:
            return self.mask([not func(item) for item in self])
        else:
            return self.mask([func(item) for item in self])

    def wherein(self, fieldname, collection, negate=False):
        """
        Returns a new DataTable with rows only where the value at
        `fieldname` is contained within `collection`.
        """
        if negate:
            return self.mask([elem not in collection
                              for elem in self[fieldname]])
        else:
            return self.mask([elem in collection
                              for elem in self[fieldname]])

    def wheregreater(self, fieldname, value):
        """
        Returns a new DataTable with rows only where the value at
        `fieldname` > `value`.
        """
        return self.mask([elem > value for elem in self[fieldname]])

    def whereless(self, fieldname, value):
        """
        Returns a new DataTable with rows only where the value at
        `fieldname` < `value`.
        """
        return self.mask([elem < value for elem in self[fieldname]])

    def wherenot(self, fieldname, value):
        """
        Logical opposite of `where`.
        """
        return self.where(fieldname, value, negate=True)

    def wherenotfunc(self, func):
        """
        Logical opposite of `wherefunc`.
        """
        return self.wherefunc(func, negate=True)

    def wherenotin(self, fieldname, value):
        """
        Logical opposite of `wherein`.
        """
        return self.wherein(fieldname, value, negate=True)

    def writecsv(self, path, delimiter=","):
        writer = UnicodeRW.UnicodeWriter(open(path, 'wb'),
                                         self.fields,
                                         delimiter=delimiter,
                                         lineterminator=u"\n")
        writer.writerow(self.fields)
        writer.writerows(self)
        writer.close()

    def writexlsx(self, path, sheetname="default"):
        """
        Writes this table to an .xlsx file at the specified path.

        If you'd like to specify a sheetname, you may do so.

        If you'd like to write one workbook with different DataTables
        for each sheet, import the `excel` function from acrylic. You
        can see that code in `utils.py`.

        Note that the outgoing file is an .xlsx file, so it'd make sense to
        name that way.
        """
        writer = ExcelRW.UnicodeWriter(path)
        writer.set_active_sheet(sheetname)
        writer.writerow(self.fields)
        writer.writerows(self)
        writer.save()

    def __iter__(self):
        datarow = datarow_constructor(self.fields)
        for values in izip(*[self.__data[field] for field in self.fields]):
            yield datarow(values)
Beispiel #8
0
class DotMap(OrderedDict):
    def __init__(self, *args, **kwargs):

        self._map = OrderedDict()
        self._dynamic = True    # mettendo False non funzionano più i test di default. E' normale in quanto si aspettano la creazione dinamica dei figli

            # ===================================
        if LORETO:
            global MY_DICT_TYPES  # global var per la classe
            self._dynamic = False    # mettendo False non funzionano più i test di default. E' normale in quanto si aspettano la creazione dinamica dei figli
            MY_DICT_TYPES = [dict, DotMap] # by Loreto (DEFAULT dictionary)
            # ===================================

        if kwargs:
            if '_dynamic' in kwargs:
                self._dynamic = kwargs['_dynamic']
        if args:
            d = args[0]
            if isinstance(d, dict):
                for k,v in self.__call_items(d):
                    if type(v) is dict:
                        v = DotMap(v, _dynamic=self._dynamic)
                    if type(v) is list:
                        l = []
                        for i in v:
                            n = i
                            if type(i) is dict:
                                n = DotMap(i, _dynamic=self._dynamic)
                            l.append(n)
                        v = l
                    self._map[k] = v
        if kwargs:
            for k,v in self.__call_items(kwargs):
                if k is not '_dynamic':
                    self._map[k] = v

    def __call_items(self, obj):
        if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
            return obj.iteritems()
        else:
            return obj.items()

    def items(self):
        return self.iteritems()

    def iteritems(self):
        return self.__call_items(self._map)

    def __iter__(self):
        return self._map.__iter__()

    def next(self):
        return self._map.next()

    def __setitem__(self, k, v):
        self._map[k] = v
    def __getitem__(self, k):
        if k not in self._map and self._dynamic and k != '_ipython_canary_method_should_not_exist_':
            # automatically extend to new DotMap
            self[k] = DotMap()
        return self._map[k]

    def __setattr__(self, k, v):
        if k in {'_map','_dynamic', '_ipython_canary_method_should_not_exist_'}:
            super(DotMap, self).__setattr__(k,v)
        else:
            self[k] = v

    def __getattr__(self, k):
        if k == {'_map','_dynamic','_ipython_canary_method_should_not_exist_'}:
            super(DotMap, self).__getattr__(k)
        else:
            return self[k]

    def __delattr__(self, key):
        return self._map.__delitem__(key)

    def __contains__(self, k):
        return self._map.__contains__(k)

    def __str__(self):
        items = []
        for k,v in self.__call_items(self._map):
            # bizarre recursive assignment situation (why someone would do this is beyond me)
            if id(v) == id(self):
                items.append('{0}=DotMap(...)'.format(k))
            else:
                items.append('{0}={1}'.format(k, repr(v)))
        out = 'DotMap({0})'.format(', '.join(items))
        return out

    def __repr__(self):
        return str(self)

    def toDict(self):
        d = {}
        for k,v in self.items():
            if type(v) is DotMap:
                # bizarre recursive assignment support
                if id(v) == id(self):
                    v = d
                else:
                    v = v.toDict()
            elif type(v) is list:
                l = []
                for i in v:
                    n = i
                    if type(i) is DotMap:
                        n = i.toDict()
                    l.append(n)
                v = l
            d[k] = v
        return d

    def pprint(self):
        pprint(self.toDict())

        # ===================================
    if LORETO:
        # MY_DICT_TYPES = [dict, DotMap]
        def Ptr(self, listOfQualifiers, create=False):
            ptr = self
            for item in listOfQualifiers:
                if item in ptr:
                    ptr = ptr[item]
                else:
                    if create:
                        ptr[item] = DotMap()
                        ptr = ptr[item]
                    else:
                        return None

            return ptr

        def KeyTree(self, fPRINT=False):
            return DictToList.KeyTree(self, myDictTYPES=MY_DICT_TYPES, fPRINT=fPRINT)

        def KeyList(self):
            return DictToList.KeyList(self, myDictTYPES=MY_DICT_TYPES)


        def PrintTree(self, fEXIT=False, MaxLevel=10, header=None, printTYPE='LTKV', stackLevel=1):
            PrintDictionaryTree.PrintDictionary(self, myDictTYPES=MY_DICT_TYPES, printTYPE=printTYPE, fEXIT=fEXIT, MaxLevel=MaxLevel, header=header, stackLevel=stackLevel+1)

        printDict = PrintTree
        printTree = PrintTree

        def GetValue(self, listOfQualifiers=[], fPRINT=False):
            return DictToList.getValue(self, listOfQualifiers=listOfQualifiers, myDictTYPES=MY_DICT_TYPES, fPRINT=fPRINT)
        # ===================================

    def empty(self):
        return (not any(self))

    # proper dict subclassing
    def values(self):
        return self._map.values()

    # ipython support
    def __dir__(self):
        return self.keys()

    @classmethod
    def parseOther(self, other):
        if type(other) is DotMap:
            return other._map
        else:
            return other
    def __cmp__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__cmp__(other)
    def __eq__(self, other):
        other = DotMap.parseOther(other)
        if not isinstance(other, dict):
            return False
        return self._map.__eq__(other)
    def __ge__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ge__(other)
    def __gt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__gt__(other)
    def __le__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__le__(other)
    def __lt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__lt__(other)
    def __ne__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ne__(other)

    def __delitem__(self, key):
        return self._map.__delitem__(key)
    def __len__(self):
        return self._map.__len__()
    def clear(self):
        self._map.clear()
    def copy(self):
        return DotMap(self.toDict())
    def get(self, key, default=None):
        return self._map.get(key, default)
    def has_key(self, key):
        return key in self._map
    def iterkeys(self):
        return self._map.iterkeys()
    def itervalues(self):
        return self._map.itervalues()
    def keys(self):
        return self._map.keys()
    def pop(self, key, default=None):
        return self._map.pop(key, default)
    def popitem(self):
        return self._map.popitem()
    def setdefault(self, key, default=None):
        self._map.setdefault(key, default)
    def update(self, *args, **kwargs):
        if len(args) != 0:
            self._map.update(*args)
        self._map.update(kwargs)
    def viewitems(self):
        return self._map.viewitems()
    def viewkeys(self):
        return self._map.viewkeys()
    def viewvalues(self):
        return self._map.viewvalues()
    @classmethod
    def fromkeys(cls, seq, value=None):
        d = DotMap()
        d._map = OrderedDict.fromkeys(seq, value)
        return d
    def __getstate__(self): return self.__dict__
    def __setstate__(self, d): self.__dict__.update(d)
Beispiel #9
0
def main(argv=None):
    '''
    Handles command line arguments and gets things started.

    :param argv: List of arguments, as if specified on the command-line.
                 If None, ``sys.argv[1:]`` is used instead.
    :type argv: list of str
    '''
    # Get command line arguments
    parser = argparse.ArgumentParser(description="Combine MegaM files that \
                                                  contain features for the same\
                                                  files.",
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('megam_file',
                        help='MegaM input file(s). Each feature line must be \
                              preceded by a comment with the filename/ID that \
                              the features should be joined on.',
                        nargs='+')
    parser.add_argument('-b', '--binary',
                        help='Converts all of the features in the specified \
                              range of files to presence/absence binary \
                              features. Files are numbered starting from 1, and\
                              if 0 is specified with this flag, all files are\
                              converted.',
                        type=parse_num_list)
    parser.add_argument('--doubleup',
                        help='Keep both the binary and numeric versions of any\
                              feature you convert to binary.',
                        action='store_true')
    parser.add_argument('-c', '--common',
                        help='Only output features for filenames that are \
                              common to all MegaM files.',
                        action='store_true')
    parser.add_argument('--version', action='version',
                        version='%(prog)s {0}'.format(__version__))
    args = parser.parse_args(argv)

    # Make warnings from built-in warnings module get formatted more nicely
    logging.captureWarnings(True)
    logging.basicConfig(format=('%(asctime)s - %(name)s - %(levelname)s - ' +
                                '%(message)s'))
    logger = logging.getLogger(__name__)

    # Map from filenames to feature strings
    feature_dict = OrderedDict()
    class_dict = {}
    filename_set = set()

    # Set that will contain all of the features seen in previous files
    # (for duplicate detection)
    prev_feature_set = set()

    # Iterate through MegaM files
    for file_num, infile in enumerate(args.megam_file, start=1):
        # Initialize duplicate feature book-keeping variables
        curr_feature_set = set()

        # Initialize set for storing filenames mentioned in current MegaM file
        curr_filename_set = set()

        # Handle current MegaM file
        for curr_filename, class_name, feature_dict in _MegaMDictIter(infile):
            if curr_filename in class_dict:
                if class_dict[curr_filename] != class_name:
                    raise ValueError(("Inconsisten class label for instance " +
                                      "{} in {}.").format(curr_filename,
                                                          infile.name))
            else:
                class_dict[curr_filename] = class_name
            # If there are non-zero features, process them
            if feature_dict:
                for feat_name, feat_val in iteritems(feature_dict):
                    # Handle duplicate features
                    feat_name = get_unique_name(feat_name, prev_feature_set,
                                                infile.name)
                    # Ignore zero-valued features
                    try:
                        if feat_val != 'N/A' and float(feat_val) != 0:
                            # Convert feature to binary if necessary
                            if (args.binary and ((args.binary == [0]) or (file_num in args.binary))):
                                if args.doubleup:
                                    new_feat_pair = '{} {} '.format(feat_name, feat_val)
                                    feature_dict[curr_filename] = new_feat_pair if curr_filename not in feature_dict else feature_dict[curr_filename] + new_feat_pair
                                    curr_feature_set.add(feat_name)
                                    feat_name = get_unique_name(feat_name + "_binary", prev_feature_set, infile.name)
                                feat_val = 1

                            # Add feature pair to current string of features
                            new_feat_pair = '{} {} '.format(feat_name,
                                                            feat_val)
                            feature_dict[curr_filename] = new_feat_pair if curr_filename not in feature_dict else feature_dict[curr_filename] + new_feat_pair
                            curr_feature_set.add(feat_name)
                    except ValueError:
                        raise ValueError(("Invalid feature value in feature " +
                                          "pair '{} {}' for file {}").format(feat_name,
                                                                             feat_val,
                                                                             curr_filename).encode('utf-8'))

            # Otherwise warn about lack of features (although that really
            # just means all of them have zero values)
            else:
                if curr_filename not in feature_dict:
                    feature_dict[curr_filename] = ""
                logger.warning(("No features found for {} in {}. All are " +
                                "assumed to be zero.").format(curr_filename,
                                                              infile.name).encode('utf-8'))

        # Add current file's features to set of seen features
        prev_feature_set.update(curr_feature_set)

        # Either intersect or union current file's filenames with existing ones
        if args.common and filename_set:
            filename_set.intersection_update(curr_filename_set)
        else:
            filename_set.update(curr_filename_set)

    # Print new MegaM file
    for curr_filename in feature_dict.viewkeys():
        # Skip files that aren't common when args.common is true
        if curr_filename not in filename_set:
            continue
        print("# {}".format(curr_filename).encode('utf-8'))
        print("{}\t{}".format(class_dict[curr_filename],
                              feature_dict[curr_filename].strip()).encode('utf-8'))
Beispiel #10
0
class DotMap(MutableMapping, OrderedDict):
    def __init__(self, *args, **kwargs):
        self._map = OrderedDict()
        if args:
            assert len(args) == 1
            d = args[0]
            if isinstance(d, dict):
                for k, v in self.__call_items(d):
                    if isinstance(v, dict):
                        v = DotMap(v)
                    if isinstance(v, list):
                        l = []
                        for i in v:
                            n = i
                            if isinstance(i, dict):
                                n = DotMap(i)
                            l.append(n)
                        v = l
                    self._map[k] = v
        if kwargs:
            for k, v in self.__call_items(kwargs):
                self._map[k] = v

    def __call_items(self, obj):
        if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
            return obj.iteritems()
        return obj.items()

    def items(self):
        return self.iteritems()

    def iteritems(self):
        return self.__call_items(self._map)

    def __iter__(self):
        return self._map.__iter__()

    def next(self):
        return self._map.next()

    def __setitem__(self, k, v):
        self._map[k] = v

    def __getitem__(self, k):
        if k not in self._map and k != IPYTHON_CANNARY:
            # automatically extend to new DotMap
            self[k] = DotMap()
        return self._map[k]

    def __setattr__(self, k, v):
        if k in {'_map', IPYTHON_CANNARY}:
            super(DotMap, self).__setattr__(k, v)
        else:
            self[k] = v

    def __getattr__(self, k):
        if k == {'_map', IPYTHON_CANNARY}:
            super(DotMap, self).__getattr__(k)
        else:
            return self[k]

    def __delattr__(self, key):
        return self._map.__delitem__(key)

    def __contains__(self, k):
        return self._map.__contains__(k)

    def __str__(self):
        items = []
        for k, v in self.__call_items(self._map):
            if id(v) == id(self):
                items.append('{0}=DotMap(...)'.format(k))
            else:
                items.append('{0}={1}'.format(k, repr(v)))
        joined = ', '.join(items)
        out = '{0}({1})'.format(self.__class__.__name__, joined)
        return out

    __repr__ = __str__

    def toDict(self):
        d = {}
        for k, v in self.items():
            if isinstance(v, DotMap):
                if id(v) == id(self):
                    v = d
                else:
                    v = v.toDict()
            elif isinstance(v, (list, tuple)):
                l = []
                for i in v:
                    n = i
                    if type(i) is DotMap:
                        n = i.toDict()
                    l.append(n)
                if isinstance(v, tuple):
                    v = tuple(l)
                else:
                    v = l
            d[k] = v
        return d

    def empty(self):
        return (not any(self))

    def values(self):
        return self._map.values()

    # ipython support
    def __dir__(self):
        return self.keys()

    @classmethod
    def parse_other(cls, other):
        if isinstance(other, DotMap):
            return other._map
        return other

    def __cmp__(self, other):
        other = DotMap.parse_other(other)
        return self._map.__cmp__(other)

    def __eq__(self, other):
        other = DotMap.parse_other(other)
        if not isinstance(other, dict):
            return False
        return self._map.__eq__(other)

    def __ge__(self, other):
        other = DotMap.parse_other(other)
        return self._map.__ge__(other)

    def __gt__(self, other):
        other = DotMap.parse_other(other)
        return self._map.__gt__(other)

    def __le__(self, other):
        other = DotMap.parse_other(other)
        return self._map.__le__(other)

    def __lt__(self, other):
        other = DotMap.parse_other(other)
        return self._map.__lt__(other)

    def __ne__(self, other):
        other = DotMap.parse_other(other)
        return self._map.__ne__(other)

    def __delitem__(self, key):
        return self._map.__delitem__(key)

    def __len__(self):
        return self._map.__len__()

    def clear(self):
        self._map.clear()

    def copy(self):
        return DotMap(self)

    def __copy__(self):
        return self.copy()

    def __deepcopy__(self, memo=None):
        return self.copy()

    def get(self, key, default=None):
        return self._map.get(key, default)

    def has_key(self, key):
        return key in self._map

    def iterkeys(self):
        return self._map.iterkeys()

    def itervalues(self):
        return self._map.itervalues()

    def keys(self):
        return self._map.keys()

    def pop(self, key, default=None):
        return self._map.pop(key, default)

    def popitem(self):
        return self._map.popitem()

    def setdefault(self, key, default=None):
        self._map.setdefault(key, default)

    def update(self, *args, **kwargs):
        if len(args) != 0:
            self._map.update(*args)
        self._map.update(kwargs)

    def viewitems(self):
        return self._map.viewitems()

    def viewkeys(self):
        return self._map.viewkeys()

    def viewvalues(self):
        return self._map.viewvalues()

    @classmethod
    def fromkeys(cls, seq, value=None):
        d = DotMap()
        d._map = OrderedDict.fromkeys(seq, value)
        return d

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)
Beispiel #11
0
class PersistentStream(Stream):
    ended = False

    def __init__(self, handle, streamdir, setdir, commit, parent=None):
        self.parent = parent
        self.handle = handle
        self.streamdir = streamdir
        self.setdir = setdir
        self.cache = deque((), self.CACHESIZE)
        self.streams = OrderedDict() if self.group else None
        # open right away: empty stream -> empty file
        path = os.path.join(streamdir, '{}-stream'.format(handle.name))
        self.streamfile = None if self.group else open(path, 'wb')
        self.done = set()
        self._commit = commit

    def add_msg(self, msg):
        assert not self.ended
        assert msg.handle == self.handle
        assert msg.idx is not None
        if self.group:
            assert isinstance(msg.data, Handle) or msg.data is THEEND, (self,
                                                                        msg)
        else:
            assert not isinstance(msg.data, Handle) or msg.idx == -1, (self,
                                                                       msg)

        expected_idx = self.cache[0].idx + 1 if self.cache else -1
        assert msg.idx == expected_idx, (msg.idx, expected_idx)

        self.logdebug('adding %r', msg)
        if msg.data is THEEND:
            self.ended = True
            self.logdebug('ended')
            if not self.group or self.done == self.streams.viewkeys():
                self._commit(self)
        elif isinstance(msg.data, Wrapper) and \
             msg.data._reader.schema.node.id == File.schema.node.id:
            assert msg.data._streamdir == self.streamdir
            stat = os.stat(msg.data.path)
            mtime = int(stat.st_mtime * 1e9)
            msg.data._reader = File.new_message(path=msg.data._reader.path,
                                                mtime=mtime, size=stat.st_size)\
                                   .as_reader()
        self.cache.appendleft(msg)
        if not self.group and msg.idx >= 0:
            # TODO: Maybe we want to have a builder and change to
            # reader before putting in cache?
            try:
                write_packed = msg.data._reader.as_builder().write_packed
            except AttributeError:
                assert msg.data is THEEND
                self.streamfile.close()
                return
            write_packed(self.streamfile)

    def get_msg(self, req):
        assert req.handle == self.handle

        offset = self.cache[0].idx - req.idx if self.cache else -1
        if offset < 0:
            assert not self.ended
            return None

        msg = self.cache[offset]
        assert msg.data is not None
        self.logdebug('return %r', msg)
        return msg

    def create_stream(self, name, group, header=None):
        assert self.group
        assert not self.ended
        handle = Handle(self.handle.setid,
                        self.handle.node,
                        name,
                        group=group,
                        header=header)
        stream = type(self)(handle,
                            self.streamdir,
                            self.setdir,
                            self.commit_substream,
                            parent=self)
        self.streams[handle.name] = stream
        return stream

    def commit_substream(self, substream):
        self.done.add(substream.handle.name)
        if self.ended and self.done == self.streams.viewkeys():
            self._commit(self)

    def make_file(self, name):
        assert not self.ended
        assert not self.group, (self, name)
        path = os.path.join(self.streamdir, name)
        fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o666)
        os.close(fd)  # shall we pass on the open handle?
        # TODO: consider storing path relative to setdir instead of name
        file = File.new_message(path=path).as_reader()
        wrapper = Wrapper(file, self.streamdir, self.setdir)
        self.logdebug('made file %r', wrapper)
        return wrapper
Beispiel #12
0
class BaseCache(object):
    """
    BaseCache is a class that saves and operates on an OrderedDict. It has a
    certain capacity, stored in the attribute `maxsize`. Whether this
    capacity is reached, can be checked by using the boolean property
    `is_full`. To implement a custom cache, inherit from this class and
    override the methods ``__getitem__`` and ``__setitem__``.
    Call the method `sunpy.database.caching.BaseCache.callback` as soon
    as an item from the cache is removed.
    """
    __metaclass__ = ABCMeta

    def __init__(self, maxsize=float('inf')):
        self.maxsize = maxsize
        self._dict = OrderedDict()

    def get(self, key, default=None):  # pragma: no cover
        """Return the corresponding value to `key` if `key` is in the cache,
        `default` otherwise. This method has no side-effects, multiple calls
        with the same cache and the same passed key must always return the same
        value.

        """
        try:
            return self._dict[key]
        except KeyError:
            return default

    @abstractmethod
    def __getitem__(self, key):
        """abstract method: this method must be overwritten by inheriting
        subclasses. It defines what happens if an item from the cache is
        attempted to be accessed.

        """
        return  # pragma: no cover

    @abstractmethod
    def __setitem__(self, key, value):
        """abstract method: this method must be overwritten by inheriting
        subclasses. It defines what happens if a new value should be assigned
        to the given key. If the given key does already exist in the cache or
        not must be checked by the person who implements this method.
        """

    @abstractproperty
    def to_be_removed(self):
        """The item that will be removed on the next
        :meth:`sunpy.database.caching.BaseCache.remove` call.

        """

    @abstractmethod
    def remove(self):
        """Call this method to manually remove one item from the cache. Which
        item is removed, depends on the implementation of the cache. After the
        item has been removed, the callback method is called.

        """

    def callback(self, key, value):
        """This method should be called (by convention) if an item is removed
        from the cache because it is full. The passed key and value are the
        ones that are removed. By default this method does nothing, but it
        can be customized in a custom cache that inherits from this base class.

        """

    @property
    def is_full(self):
        """True if the number of items in the cache equals :attr:`maxsize`,
        False otherwise.

        """
        return len(self._dict) == self.maxsize

    def __delitem__(self, key):
        self._dict.__delitem__(key)

    def __contains__(self, key):
        return key in self._dict.keys()

    def __len__(self):
        return len(self._dict)

    def __iter__(self):
        for key in self._dict.__iter__():
            yield key

    def __reversed__(self):  # pragma: no cover
        for key in self._dict.__reversed__():
            yield key

    def clear(self):  # pragma: no cover
        return self._dict.clear()

    def keys(self):  # pragma: no cover
        return self._dict.keys()

    def values(self):  # pragma: no cover
        return self._dict.values()

    def items(self):  # pragma: no cover
        return self._dict.items()

    def iterkeys(self):  # pragma: no cover
        return self._dict.iterkeys()

    def itervalues(self):  # pragma: no cover
        for value in self._dict.itervalues():
            yield value

    def iteritems(self):  # pragma: no cover
        for key, value in six.iteritems(self._dict):
            yield key, value

    def update(self, *args, **kwds):  # pragma: no cover
        self._dict.update(*args, **kwds)

    def pop(self, key, default=MutableMapping._MutableMapping__marker):  # pragma: no cover
        return self._dict.pop(key, default)

    def setdefault(self, key, default=None):  # pragma: no cover
        return self._dict.setdefault(key, default)

    def popitem(self, last=True):  # pragma: no cover
        return self._dict.popitem(last)

    def __reduce__(self):  # pragma: no cover
        return self._dict.__reduce__()

    def copy(self):  # pragma: no cover
        return self._dict.copy()

    def __eq__(self, other):  # pragma: no cover
        return self._dict.__eq__(other)

    def __ne__(self, other):  # pragma: no cover
        return self._dict.__ne__(other)

    def viewkeys(self):  # pragma: no cover
        return self._dict.viewkeys()

    def viewvalues(self):  # pragma: no cover
        return self._dict.viewvalues()

    def viewitems(self):  # pragma: no cover
        return self._dict.viewitems()

    @classmethod
    def fromkeys(cls, iterable, value=None):  # pragma: no cover
        return OrderedDict.fromkeys(iterable, value)

    def __repr__(self):  # pragma: no cover
        return '{0}({1!r})'.format(self.__class__.__name__, dict(self._dict))
Beispiel #13
0
    r_coord = OrderedDict()
    for key in pf.xyz_obs.keys():
        r_coord[key] = np.sqrt(
            np.sum(np.square(pf.xyz_obs[key] - hkl_xyz[key])))
    n_reflections = len(p_est.keys())

    # remove any peaks whose observed center is too distant from predicted center
    p_est = OrderedDict((key, val) for key, val in p_est.items()
                        if r_coord[key] < params['c_threshold'])
    p_std = OrderedDict((key, val) for key, val in p_std.items()
                        if r_coord[key] < params['c_threshold'])
    i_std = OrderedDict((key, val) for key, val in i_std.items()
                        if r_coord[key] < params['c_threshold'])
    i_est = OrderedDict((key, val) for key, val in i_est.items()
                        if r_coord[key] < params['c_threshold'])
    assert i_est.viewkeys() == p_est.viewkeys()
    print "%i reflections removed due to peak coordinates residual" % (
        n_reflections - len(p_est.keys()))

    # eliminate any Millers associated with negative intensities
    p_est = OrderedDict(
        (key, val) for key, val in p_est.items() if i_est[key] > 0)
    p_std = OrderedDict(
        (key, val) for key, val in p_std.items() if i_est[key] > 0)
    i_std = OrderedDict(
        (key, val) for key, val in i_std.items() if i_est[key] > 0)
    i_est = OrderedDict(
        (key, val) for key, val in i_est.items() if i_est[key] > 0)
    assert i_est.viewkeys() == p_est.viewkeys()

    # save information to savepath
Beispiel #14
0
class DotMap(OrderedDict):

	def __init__(self, *args, **kwargs):
		self._map = OrderedDict()
		if args:
			d = args[0]
			if type(d) is dict:
				for k,v in self.__call_items(d):
					if type(v) is dict:
						v = DotMap(v)
					self._map[k] = v
		if kwargs:
			for k,v in self.__call_items(kwargs):
				self._map[k] = v

	def __call_items(self, obj):
		if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
			return obj.iteritems()
		else:
			return obj.items()

	def items(self):
		return self.iteritems()

	def iteritems(self):
		return self.__call_items(self._map)

	def __iter__(self):
		return self._map.__iter__()

	def next(self):
		return self._map.next()

	def __setitem__(self, k, v):
		self._map[k] = v
	def __getitem__(self, k):
		if k not in self._map:
			# automatically extend to new DotMap
			self[k] = DotMap()
		return self._map[k]

	def __setattr__(self, k, v):
		if k == '_map':
			super(DotMap, self).__setattr__(k,v)
		else:
			self[k] = v

	def __getattr__(self, k):
		if k == '_map':
			super(DotMap, self).__getattr__(k)
		else:
			return self[k]

	def __delattr__(self, key):
		return self._map.__delitem__(key)

	def __contains__(self, k):
		return self._map.__contains__(k)

	def __str__(self):
		items = []
		for k,v in self.__call_items(self._map):
			items.append('{0}={1}'.format(k, repr(v)))
		out = 'DotMap({0})'.format(', '.join(items))
		return out

	def __repr__(self):
		return str(self)

	def toDict(self):
		d = {}
		for k,v in self.items():
			if type(v) is DotMap:
				v = v.toDict()
			d[k] = v
		return d

	def pprint(self):
		pprint(self.toDict())

	# proper dict subclassing
	def values(self):
		return self._map.values()

	@classmethod
	def parseOther(self, other):
		if type(other) is DotMap:
			return other._map
		else:
			return other	
	def __cmp__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__cmp__(other)
	def __eq__(self, other):
		other = DotMap.parseOther(other)
		if not isinstance(other, dict):
			return False
		return self._map.__eq__(other)
	def __ge__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__ge__(other)
	def __gt__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__gt__(other)
	def __le__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__le__(other)
	def __lt__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__lt__(other)
	def __ne__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__ne__(other)

	def __delitem__(self, key):
		return self._map.__delitem__(key)
	def __len__(self):
		return self._map.__len__()
	def clear(self):
		self._map.clear()
	def copy(self):
		return self
	def get(self, key, default=None):
		return self._map.get(key, default)
	def has_key(self, key):
		return key in self._map
	def iterkeys(self):
		return self._map.iterkeys()
	def itervalues(self):
		return self._map.itervalues()
	def keys(self):
		return self._map.keys()
	def pop(self, key, default=None):
		return self._map.pop(key, default)
	def popitem(self):
		return self._map.popitem()
	def setdefault(self, key, default=None):
		self._map.setdefault(key, default)
	def update(self, *args, **kwargs):
		if len(args) != 0:
			self._map.update(*args)
		self._map.update(kwargs)
	def viewitems(self):
		return self._map.viewitems()
	def viewkeys(self):
		return self._map.viewkeys()
	def viewvalues(self):
		return self._map.viewvalues()
	@classmethod
	def fromkeys(cls, seq, value=None):
		d = DotMap()
		d._map = OrderedDict.fromkeys(seq, value)
		return d
Beispiel #15
0
class ConfigMap(MutableMapping, OrderedDict):

    def __init__(self, *args, **kwargs):
        self._map = OrderedDict()
        # todo: simplify
        self._dynamic = True
        if kwargs:
            if '_dynamic' in kwargs:
                self._dynamic = kwargs['_dynamic']
                del kwargs['_dynamic']
        self._evaluate = True
        if kwargs:
            if '_evaluate' in kwargs:
                self._evaluate = kwargs['_evaluate']
                del kwargs['_evaluate']
        self._evaluated = False
        if kwargs:
            if '_evaluated' in kwargs:
                self._evaluated = kwargs['_evaluated']
                del kwargs['_evaluated']
        if args:
            d = args[0]
            if isinstance(d, dict):
                for k, v in self.__call_items(d):
                    if isinstance(v, dict):
                        v = ConfigMap(v, _dynamic=self._dynamic, _evaluate=self._evaluate, _evaluated=self._evaluated)
                    if type(v) is list:
                        l = []
                        for i in v:
                            n = i
                            if type(i) is dict:
                                n = ConfigMap(i, _dynamic=self._dynamic, _evaluate=self._evaluate,
                                              _evaluated=self._evaluated)
                            l.append(n)
                        v = l
                    self._map[k] = v
        if kwargs:
            for k, v in self.__call_items(kwargs):
                self._map[k] = v

    def __call_items(self, obj):
        if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
            return obj.iteritems()
        else:
            return obj.items()

    def items(self):
        return self.iteritems()

    def iteritems(self):
        return self.__call_items(self._map)

    def __iter__(self):
        return self._map.__iter__()

    def next(self):
        return self._map.next()

    def __setitem__(self, k, v):
        self._map[k] = v

    def __getitem__(self, k, evaluate=None):
        if evaluate is None:
            evaluate = self._evaluate

        if k not in self._map:
            if k == '_ipython_canary_method_should_not_exist_':
                raise KeyError

            if self._dynamic:
                # automatically extend to new ConfigMap
                self[k] = ConfigMap()
            else:
                # todo: display full recursive path?
                raise KeyError("'%s' does not exist" % k)

        var = self._map[k]

        if evaluate:
            if isinstance(var, ConfigMethod):
                var = var.evaluate()
                # todo: return instead to avoid second config map eval?

            if isinstance(var, ConfigMap):
                var = var.evaluate()

        return var

    def __setattr__(self, k, v):
        if k in ['_map', '_dynamic', '_ipython_canary_method_should_not_exist_', '_evaluate', '_evaluated']:
            super(ConfigMap, self).__setattr__(k, v)
        else:
            self[k] = v

    def __getattr__(self, k):
        if k in ['_map', '_dynamic', '_ipython_canary_method_should_not_exist_', '_evaluate', '_evaluated']:
            return self.__getattribute__(k)
        else:
            return self[k]

    def __delattr__(self, key):
        return self._map.__delitem__(key)

    def __contains__(self, k):
        return self._map.__contains__(k)

    def __str__(self):
        items = []
        for k, v in self.__call_items(self._map):
            # bizarre recursive assignment situation (why someone would do this is beyond me)
            if id(v) == id(self):
                items.append('{0}=ConfigMap(...)'.format(k))
            else:
                items.append('{0}={1}'.format(k, repr(v)))
        joined = ', '.join(items)
        out = '{0}({1})'.format(self.__class__.__name__, joined)
        return out

    def __repr__(self):
        return str(self)

    def toDict(self, evaluate=None, with_hidden=True):
        if evaluate is None:
            evaluate = bool(self._evaluate)

        d = {}
        for k, v in self.items():
            if evaluate and isinstance(v, ConfigMethod):
                v = v.evaluate()
            if isinstance(v, ConfigMap):
                v = v.toDict(evaluate=evaluate, with_hidden=with_hidden) if id(v) != id(self) else d
            elif isinstance(v, list):
                v = [i.toDict(evaluate=evaluate, with_hidden=with_hidden) if isinstance(i, ConfigMap) else i for i in v]
            elif isinstance(v, tuple):
                v = (i.toDict(evaluate=evaluate, with_hidden=with_hidden) if isinstance(i, ConfigMap) else i for i in v)

            if with_hidden is False \
                    and (isinstance(k, str) and
                         ((k.startswith('_') and not k.endswith('_')) or k.startswith('~'))):
                continue

            d[k] = v

        return d

    def evaluate(self):
        if self._evaluated:
            return self

        # TODO: case where config method access a key of the config that is just being evaluated.
        #  shouldn't give an endless loop

        # todo: make more efficient
        return ConfigMap(self.toDict(evaluate=True), _dynamic=False, _evaluated=True)

    def pprint(self, pformat='json'):
        if pformat == 'json':
            print(dumps(self.toDict(), indent=4, sort_keys=True, default=str))
        else:
            pprint(self.toDict())

    def empty(self):
        return not any(self)

        # proper dict subclassing

    def values(self):
        return self._map.values()

        # ipython support

    def __dir__(self):
        return self.keys()

    @classmethod
    def parseOther(self, other):
        if type(other) is ConfigMap:
            return other._map
        else:
            return other

    def __cmp__(self, other):
        other = ConfigMap.parseOther(other)
        return self._map.__cmp__(other)

    def __eq__(self, other):
        other = ConfigMap.parseOther(other)
        if not isinstance(other, dict):
            return False
        return self._map.__eq__(other)

    def __ge__(self, other):
        other = ConfigMap.parseOther(other)
        return self._map.__ge__(other)

    def __gt__(self, other):
        other = ConfigMap.parseOther(other)
        return self._map.__gt__(other)

    def __le__(self, other):
        other = ConfigMap.parseOther(other)
        return self._map.__le__(other)

    def __lt__(self, other):
        other = ConfigMap.parseOther(other)
        return self._map.__lt__(other)

    def __ne__(self, other):
        other = ConfigMap.parseOther(other)
        return self._map.__ne__(other)

    def __delitem__(self, key):
        return self._map.__delitem__(key)

    def __len__(self):
        return self._map.__len__()

    def clear(self):
        self._map.clear()

    def copy(self):
        return ConfigMap(self, _dynamic=self._dynamic, _evaluate=self._evaluate, _evaluated=self._evaluated)

    def __copy__(self):
        return self.copy()

    def __deepcopy__(self, memo=None):
        return self.copy()

    def get(self, key, default=None):
        return self._map.get(key, default)

    def has_key(self, key):
        return key in self._map

    def iterkeys(self):
        return self._map.iterkeys()

    def itervalues(self):
        return self._map.itervalues()

    def keys(self):
        return self._map.keys()

    def pop(self, key, default=None):
        return self._map.pop(key, default)

    def popitem(self):
        return self._map.popitem()

    def setdefault(self, key, default=None):
        self._map.setdefault(key, default)

    def update(self, *args, **kwargs):
        if len(args) != 0:
            self._map.update(*args)
        self._map.update(kwargs)

    def viewitems(self):
        return self._map.viewitems()

    def viewkeys(self):
        return self._map.viewkeys()

    def viewvalues(self):
        return self._map.viewvalues()

    @classmethod
    def fromkeys(cls, seq, value=None):
        d = ConfigMap(_dynamic=False)
        d._map = OrderedDict.fromkeys(seq, value)
        return d

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)

    # bannerStr
    def _getListStr(self, items):
        out = '['
        mid = ''
        for i in items:
            mid += '  {}\n'.format(i)
        if mid != '':
            mid = '\n' + mid
        out += mid
        out += ']'
        return out

    def _getValueStr(self, k, v):
        outV = v
        multiLine = len(str(v).split('\n')) > 1
        if multiLine:
            # push to next line
            outV = '\n' + v
        if type(v) is list:
            outV = self._getListStr(v)
        out = '{} {}'.format(k, outV)
        return out

    def _getSubMapDotList(self, pre, name, subMap):
        outList = []
        if pre == '':
            pre = name
        else:
            pre = '{}.{}'.format(pre, name)

        def stamp(pre, k, v):
            valStr = self._getValueStr(k, v)
            return '{}.{}'.format(pre, valStr)

        for k, v in subMap.items():
            if isinstance(v, ConfigMap) and v != ConfigMap():
                subList = self._getSubMapDotList(pre, k, v)
                outList.extend(subList)
            else:
                outList.append(stamp(pre, k, v))
        return outList

    def _getSubMapStr(self, name, subMap):
        outList = ['== {} =='.format(name)]
        for k, v in subMap.items():
            if isinstance(v, ConfigMap) and v != ConfigMap():
                # break down to dots
                subList = self._getSubMapDotList('', k, v)
                # add the divit
                # subList = ['> {}'.format(i) for i in subList]
                outList.extend(subList)
            else:
                out = self._getValueStr(k, v)
                # out = '> {}'.format(out)
                out = '{}'.format(out)
                outList.append(out)
        finalOut = '\n'.join(outList)
        return finalOut

    def bannerStr(self):
        lines = []
        previous = None
        for k, v in self.items():
            if previous == 'ConfigMap':
                lines.append('-')
            out = ''
            if isinstance(v, ConfigMap):
                name = k
                subMap = v
                out = self._getSubMapStr(name, subMap)
                lines.append(out)
                previous = 'ConfigMap'
            else:
                out = self._getValueStr(k, v)
                lines.append(out)
                previous = 'other'
        lines.append('--')
        s = '\n'.join(lines)

        return s
Beispiel #16
0
    class StubBasicAgent(agent_cls):
        def __init__(self, *args, **kwargs):
            self.args = args
            self.kwargs = kwargs
            self.request_history = OrderedDict()
            self.request_queue = OrderedDict()
            self.live_request_history = OrderedDict()

        @defer.inlineCallbacks
        def replay_live(self):
            """
            Performs live requests with a live agent. Requires networking.
            This is a tool that is useful for generating live responses for
            requests that have been recorded by the stub agent.

            Live requests will only be performed once per request.
            """
            live_agent = agent_cls(*self.args, **self.kwargs)
            for stub_response in self.request_history.viewkeys():
                if stub_response not in self.live_request_history:
                    args, kwargs = self.request_history[stub_response]
                    try:
                        live_response = yield live_agent.request(
                            *args, **kwargs)
                    except Exception as e:  # pylint: disable=broad-except
                        live_response = e
                    self.live_request_history[stub_response] = ((args, kwargs),
                                                                live_response)
            yield defer.succeed(None)

        def request(self, *args, **kwargs):
            d_response = defer.Deferred()
            self.request_history[d_response] = (args, kwargs)
            self.request_queue[d_response] = (args, kwargs)
            return d_response

        @staticmethod
        def stub_response(method, version, code, phrase, headers, body):
            """ Build a stub response object. """
            transport = StringTransport()
            res = client.Response(version, code, phrase, headers, transport)
            res._bodyDataReceived(body)
            res._bodyDataFinished()
            return BasicResponse()(res, method).result

        def respond(self, version, code, phrase, headers, body):
            """ Respond to requests in FIFO order. """
            d_response, params = self.request_queue.popitem(False)
            args, kwargs = params
            method = args[0] if args else kwargs['method']
            response = self.stub_response(method, version, code, phrase,
                                          headers, body)
            d_response.callback(response)

        def fail(self, reason):
            """
            Fail requests in FIFO order.
            reason Exception. An exception instance to pass to the errback chain.
            """
            d_response, _ = self.request_queue.popitem(False)
            d_response.errback(reason)
Beispiel #17
0
class BaseCache(object):
    """
    BaseCache is a class that saves and operates on an OrderedDict. It has a
    certain capacity, stored in the attribute `maxsize`. Whether this
    capacity is reached, can be checked by using the boolean property
    `is_full`. To implement a custom cache, inherit from this class and
    override the methods ``__getitem__`` and ``__setitem__``.
    Call the method `sunpy.database.caching.BaseCache.callback` as soon
    as an item from the cache is removed.
    """
    __metaclass__ = ABCMeta

    def __init__(self, maxsize=float('inf')):
        self.maxsize = maxsize
        self._dict = OrderedDict()

    def get(self, key, default=None):  # pragma: no cover
        """Return the corresponding value to `key` if `key` is in the cache,
        `default` otherwise. This method has no side-effects, multiple calls
        with the same cache and the same passed key must always return the same
        value.

        """
        try:
            return self._dict[key]
        except KeyError:
            return default

    @abstractmethod
    def __getitem__(self, key):
        """abstract method: this method must be overwritten by inheriting
        subclasses. It defines what happens if an item from the cache is
        attempted to be accessed.

        """
        return  # pragma: no cover

    @abstractmethod
    def __setitem__(self, key, value):
        """abstract method: this method must be overwritten by inheriting
        subclasses. It defines what happens if a new value should be assigned
        to the given key. If the given key does already exist in the cache or
        not must be checked by the person who implements this method.
        """

    @abstractproperty
    def to_be_removed(self):
        """The item that will be removed on the next
        :meth:`sunpy.database.caching.BaseCache.remove` call.

        """

    @abstractmethod
    def remove(self):
        """Call this method to manually remove one item from the cache. Which
        item is removed, depends on the implementation of the cache. After the
        item has been removed, the callback method is called.

        """

    def callback(self, key, value):
        """This method should be called (by convention) if an item is removed
        from the cache because it is full. The passed key and value are the
        ones that are removed. By default this method does nothing, but it
        can be customized in a custom cache that inherits from this base class.

        """

    @property
    def is_full(self):
        """True if the number of items in the cache equals :attr:`maxsize`,
        False otherwise.

        """
        return len(self._dict) == self.maxsize

    def __delitem__(self, key):
        self._dict.__delitem__(key)

    def __contains__(self, key):
        return key in self._dict.keys()

    def __len__(self):
        return len(self._dict)

    def __iter__(self):
        for key in self._dict.__iter__():
            yield key

    def __reversed__(self):  # pragma: no cover
        for key in self._dict.__reversed__():
            yield key

    def clear(self):  # pragma: no cover
        return self._dict.clear()

    def keys(self):  # pragma: no cover
        return self._dict.keys()

    def values(self):  # pragma: no cover
        return self._dict.values()

    def items(self):  # pragma: no cover
        return self._dict.items()

    def iterkeys(self):  # pragma: no cover
        return self._dict.iterkeys()

    def itervalues(self):  # pragma: no cover
        for value in self._dict.itervalues():
            yield value

    def iteritems(self):  # pragma: no cover
        for key, value in self._dict.iteritems():
            yield key, value

    def update(self, *args, **kwds):  # pragma: no cover
        self._dict.update(*args, **kwds)

    def pop(self, key, default=MutableMapping._MutableMapping__marker):  # pragma: no cover
        return self._dict.pop(key, default)

    def setdefault(self, key, default=None):  # pragma: no cover
        return self._dict.setdefault(key, default)

    def popitem(self, last=True):  # pragma: no cover
        return self._dict.popitem(last)

    def __reduce__(self):  # pragma: no cover
        return self._dict.__reduce__()

    def copy(self):  # pragma: no cover
        return self._dict.copy()

    def __eq__(self, other):  # pragma: no cover
        return self._dict.__eq__(other)

    def __ne__(self, other):  # pragma: no cover
        return self._dict.__ne__(other)

    def viewkeys(self):  # pragma: no cover
        return self._dict.viewkeys()

    def viewvalues(self):  # pragma: no cover
        return self._dict.viewvalues()

    def viewitems(self):  # pragma: no cover
        return self._dict.viewitems()

    @classmethod
    def fromkeys(cls, iterable, value=None):  # pragma: no cover
        return OrderedDict.fromkeys(iterable, value)

    def __repr__(self):  # pragma: no cover
        return '{0}({1!r})'.format(self.__class__.__name__, dict(self._dict))
Beispiel #18
0
class DotMap(OrderedDict):
    def __init__(self, *args, **kwargs):

        self._map = OrderedDict()
        self._dynamic = True  # mettendo False non funzionano più i test di default. E' normale in quanto si aspettano la creazione dinamica dei figli

        # ===================================
        if LORETO:
            global MY_DICT_TYPES  # global var per la classe
            self._dynamic = False  # mettendo False non funzionano più i test di default. E' normale in quanto si aspettano la creazione dinamica dei figli
            MY_DICT_TYPES = [dict, DotMap,
                             OrderedDict]  # by Loreto (DEFAULT dictionary)
            # ===================================

        if kwargs:
            if '_dynamic' in kwargs:
                self._dynamic = kwargs['_dynamic']
        if args:
            d = args[0]
            if isinstance(d, dict):
                for k, v in self.__call_items(d):
                    if type(v) is dict:
                        v = DotMap(v, _dynamic=self._dynamic)
                    if type(v) is list:
                        l = []
                        for i in v:
                            n = i
                            if type(i) is dict:
                                n = DotMap(i, _dynamic=self._dynamic)
                            l.append(n)
                        v = l
                    self._map[k] = v
        if kwargs:
            for k, v in self.__call_items(kwargs):
                if k is not '_dynamic':
                    self._map[k] = v

    def __call_items(self, obj):
        if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
            return obj.iteritems()
        else:
            return obj.items()

    def items(self):
        return self.iteritems()

    def iteritems(self):
        return self.__call_items(self._map)

    def __iter__(self):
        return self._map.__iter__()

    def next(self):
        return self._map.next()

    def __setitem__(self, k, v):
        self._map[k] = v

    def __getitem__(self, k):
        if k not in self._map and self._dynamic and k != '_ipython_canary_method_should_not_exist_':
            # automatically extend to new DotMap
            self[k] = DotMap()
        return self._map[k]

    def __setattr__(self, k, v):
        if k in {
                '_map', '_dynamic', '_ipython_canary_method_should_not_exist_'
        }:
            super(DotMap, self).__setattr__(k, v)
        else:
            self[k] = v

    def __getattr__(self, k):
        if k == {
                '_map', '_dynamic', '_ipython_canary_method_should_not_exist_'
        }:
            super(DotMap, self).__getattr__(k)
        else:
            return self[k]

    def __delattr__(self, key):
        return self._map.__delitem__(key)

    def __contains__(self, k):
        return self._map.__contains__(k)

    def __str__(self):
        items = []
        for k, v in self.__call_items(self._map):
            # bizarre recursive assignment situation (why someone would do this is beyond me)
            if id(v) == id(self):
                items.append('{0}=DotMap(...)'.format(k))
            else:
                items.append('{0}={1}'.format(k, repr(v)))
        out = 'DotMap({0})'.format(', '.join(items))
        return out

    def __repr__(self):
        return str(self)

    def toDict(self):
        d = {}
        for k, v in self.items():
            if type(v) is DotMap:
                # bizarre recursive assignment support
                if id(v) == id(self):
                    v = d
                else:
                    v = v.toDict()
            elif type(v) is list:
                l = []
                for i in v:
                    n = i
                    if type(i) is DotMap:
                        n = i.toDict()
                    l.append(n)
                v = l
            d[k] = v
        return d

    def pprint(self):
        pprint(self.toDict())

        # ===================================

    if LORETO:
        # MY_DICT_TYPES = [dict, DotMap]
        def Ptr(self, listOfQualifiers, create=False):
            ptr = self
            for item in listOfQualifiers:
                if item in ptr:
                    ptr = ptr[item]
                else:
                    if create:
                        ptr[item] = DotMap()
                        ptr = ptr[item]
                    else:
                        return None

            return ptr

        def KeyTree(self, fPRINT=False):
            return DictToList.KeyTree(self,
                                      myDictTYPES=MY_DICT_TYPES,
                                      fPRINT=fPRINT)

        def KeyList(self):
            return DictToList.KeyList(self, myDictTYPES=MY_DICT_TYPES)

        def PrintTree(self,
                      fEXIT=False,
                      maxDepth=10,
                      header=None,
                      whatPrint='LTKV',
                      stackLevel=1):
            PrintDictionaryTree.PrintDictionary(self,
                                                myDictTYPES=MY_DICT_TYPES,
                                                whatPrint=whatPrint,
                                                fEXIT=fEXIT,
                                                maxDepth=maxDepth,
                                                header=header,
                                                stackLevel=stackLevel + 1)

        printDict = PrintTree
        printTree = PrintTree

        def GetValue(self, listOfQualifiers=[], fPRINT=False):
            return DictToList.getValue(self,
                                       listOfQualifiers=listOfQualifiers,
                                       myDictTYPES=MY_DICT_TYPES,
                                       fPRINT=fPRINT)

        # ===================================

    def empty(self):
        return (not any(self))

    # proper dict subclassing
    def values(self):
        return self._map.values()

    # ipython support
    def __dir__(self):
        return self.keys()

    @classmethod
    def parseOther(self, other):
        if type(other) is DotMap:
            return other._map
        else:
            return other

    def __cmp__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__cmp__(other)

    def __eq__(self, other):
        other = DotMap.parseOther(other)
        if not isinstance(other, dict):
            return False
        return self._map.__eq__(other)

    def __ge__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ge__(other)

    def __gt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__gt__(other)

    def __le__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__le__(other)

    def __lt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__lt__(other)

    def __ne__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ne__(other)

    def __delitem__(self, key):
        return self._map.__delitem__(key)

    def __len__(self):
        return self._map.__len__()

    def clear(self):
        self._map.clear()

    def copy(self):
        return DotMap(self.toDict())

    def get(self, key, default=None):
        return self._map.get(key, default)

    def has_key(self, key):
        return key in self._map

    def iterkeys(self):
        return self._map.iterkeys()

    def itervalues(self):
        return self._map.itervalues()

    def keys(self):
        return self._map.keys()

    def pop(self, key, default=None):
        return self._map.pop(key, default)

    def popitem(self):
        return self._map.popitem()

    def setdefault(self, key, default=None):
        self._map.setdefault(key, default)

    def update(self, *args, **kwargs):
        if len(args) != 0:
            self._map.update(*args)
        self._map.update(kwargs)

    def viewitems(self):
        return self._map.viewitems()

    def viewkeys(self):
        return self._map.viewkeys()

    def viewvalues(self):
        return self._map.viewvalues()

    @classmethod
    def fromkeys(cls, seq, value=None):
        d = DotMap()
        d._map = OrderedDict.fromkeys(seq, value)
        return d

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)
Beispiel #19
0
class DotMap(OrderedDict):

	def __init__(self, *args, **kwargs):
		self._map = OrderedDict()
		self._dynamic = True
		if kwargs:
			if '_dynamic' in kwargs:
				self._dynamic = kwargs['_dynamic']
		if args:
			d = args[0]
			if isinstance(d, dict):
				for k,v in self.__call_items(d):
					if type(v) is dict:
						v = DotMap(v, _dynamic=self._dynamic)
					if type(v) is list:
						l = []
						for i in v:
							n = i
							if type(i) is dict:
								n = DotMap(i, _dynamic=self._dynamic)
							l.append(n)
						v = l
					self._map[k] = v
		if kwargs:
			for k,v in self.__call_items(kwargs):
				if k is not '_dynamic':
					self._map[k] = v

	def __call_items(self, obj):
		if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
			return obj.iteritems()
		else:
			return obj.items()

	def items(self):
		return self.iteritems()

	def iteritems(self):
		return self.__call_items(self._map)

	def __iter__(self):
		return self._map.__iter__()

	def next(self):
		return self._map.next()

	def __setitem__(self, k, v):
		self._map[k] = v
	def __getitem__(self, k):
		if k not in self._map and self._dynamic and k != '_ipython_canary_method_should_not_exist_':
			# automatically extend to new DotMap
			self[k] = DotMap()
		return self._map[k]

	def __setattr__(self, k, v):
		if k in {'_map','_dynamic', '_ipython_canary_method_should_not_exist_'}:
			super(DotMap, self).__setattr__(k,v)
		else:
			self[k] = v

	def __getattr__(self, k):
		if k == {'_map','_dynamic','_ipython_canary_method_should_not_exist_'}:
			super(DotMap, self).__getattr__(k)
		else:
			return self[k]

	def __delattr__(self, key):
		return self._map.__delitem__(key)

	def __contains__(self, k):
		return self._map.__contains__(k)

	def __str__(self):
		items = []
		for k,v in self.__call_items(self._map):
			# bizarre recursive assignment situation (why someone would do this is beyond me)
			if id(v) == id(self):
				items.append('{0}=DotMap(...)'.format(k))
			else:
				items.append('{0}={1}'.format(k, repr(v)))
		out = 'DotMap({0})'.format(', '.join(items))
		return out

	def __repr__(self):
		return str(self)

	def toDict(self):
		d = {}
		for k,v in self.items():
			if type(v) is DotMap:
				# bizarre recursive assignment support
				if id(v) == id(self):
					v = d
				else:
					v = v.toDict()
			elif type(v) is list:
				l = []
				for i in v:
					n = i
					if type(i) is DotMap:
						n = i.toDict()
					l.append(n)
				v = l
			d[k] = v
		return d

	def pprint(self):
		pprint(self.toDict())

	def empty(self):
		return (not any(self))

	# proper dict subclassing
	def values(self):
		return self._map.values()

	# ipython support
	def __dir__(self):
		return self.keys()

	@classmethod
	def parseOther(self, other):
		if type(other) is DotMap:
			return other._map
		else:
			return other	
	def __cmp__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__cmp__(other)
	def __eq__(self, other):
		other = DotMap.parseOther(other)
		if not isinstance(other, dict):
			return False
		return self._map.__eq__(other)
	def __ge__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__ge__(other)
	def __gt__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__gt__(other)
	def __le__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__le__(other)
	def __lt__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__lt__(other)
	def __ne__(self, other):
		other = DotMap.parseOther(other)
		return self._map.__ne__(other)

	def __delitem__(self, key):
		return self._map.__delitem__(key)
	def __len__(self):
		return self._map.__len__()
	def clear(self):
		self._map.clear()
	def copy(self):
		return DotMap(self.toDict())
	def get(self, key, default=None):
		return self._map.get(key, default)
	def has_key(self, key):
		return key in self._map
	def iterkeys(self):
		return self._map.iterkeys()
	def itervalues(self):
		return self._map.itervalues()
	def keys(self):
		return self._map.keys()
	def pop(self, key, default=None):
		return self._map.pop(key, default)
	def popitem(self):
		return self._map.popitem()
	def setdefault(self, key, default=None):
		self._map.setdefault(key, default)
	def update(self, *args, **kwargs):
		if len(args) != 0:
			self._map.update(*args)
		self._map.update(kwargs)
	def viewitems(self):
		return self._map.viewitems()
	def viewkeys(self):
		return self._map.viewkeys()
	def viewvalues(self):
		return self._map.viewvalues()
	@classmethod
	def fromkeys(cls, seq, value=None):
		d = DotMap()
		d._map = OrderedDict.fromkeys(seq, value)
		return d
	def __getstate__(self): return self.__dict__
	def __setstate__(self, d): self.__dict__.update(d)
Beispiel #20
0
class Chemical_Analysis_pyneb():
    def __init__(self):

        self.MC_array_len = 1000
        self.MC_warning_limit = self.MC_array_len * 0.1

        self.Hbeta_label = 'H1_4861A'

    def load_elements(self):

        #Set atomic data
        #atomicData.setDataFile('he_i_rec_Pal12-Pal13.fits')
        atomicData.setDataFile('s_iii_coll_HRS12.dat')

        #Default: 's_iii_atom_PKW09.dat'
        'S3: All energy and A values: Podobedova, Kelleher, and Wiese 2009, J. Phys. Chem. Ref. Data, Vol.'
        'S3: collision strengths: Tayal & Gupta 1999, ApJ, 526, 544'

        #New Atomic data s_iii_coll_HRS12.dat
        'S3: All energy and A values: Podobedova, Kelleher, and Wiese 2009, J. Phys. Chem. Ref. Data, Vol.'
        'S3: collision strengths: Hudson, Ramsbottom & Scott 2012, ApJ, 750, 65'

        #Declare ions
        self.S2_atom = Atom('S', 2)
        self.S3_atom = Atom('S', 3)
        self.Ar3_atom = Atom('Ar', 3)
        self.Ar4_atom = Atom('Ar', 4)
        self.N2_atom = Atom('N', 2)
        self.O2_atom = Atom('O', 2)
        self.O3_atom = Atom('O', 3)
        self.H1_atom = RecAtom('H', 1)
        self.He1_atom = RecAtom('He', 1)
        self.He2_atom = RecAtom('He', 2)

        #Pyneb objects
        self.diags = Diagnostics()

        #Ohrs 2016 relation for the OI_SI gradient
        self.logSI_OI_Gradient = random.normal(
            -1.53, 0.05, size=self.MC_array_len
        )  # random.normal(-1.78,  0.03, size = self.MC_array_len)
        self.OI_SI = power(10, -self.logSI_OI_Gradient)

        #Theoretical ratios
        self.S3_ratio = self.S3_atom.getEmissivity(
            10000, 100, wave=9531) / self.S3_atom.getEmissivity(
                10000, 100, wave=9069)
        self.S3_9000_ratio = random.normal(
            self.S3_atom.getEmissivity(10000, 100, wave=9531) /
            self.S3_atom.getEmissivity(10000, 100, wave=9069),
            0.01,
            size=self.MC_array_len)
        self.N2_6000_ratio = self.N2_atom.getEmissivity(
            10000, 100, wave=6584) / self.N2_atom.getEmissivity(
                10000, 100, wave=6548)
        self.O3_5000_ratio = self.O3_atom.getEmissivity(
            10000, 100, wave=5007) / self.O3_atom.getEmissivity(
                10000, 100, wave=4959)

        #Factors to speed calculations
        self.lines_factors = {}
        self.lines_factors['S3_9069A'] = 1 + self.S3_ratio
        self.lines_factors['S3_9531A'] = 1 + 1 / self.S3_ratio

        #Cloudy models for the SIV contribution

        self.m_SIV_correction = random.normal(1.1628,
                                              0.00559,
                                              size=self.MC_array_len)
        self.n_SIV_correction = random.normal(0.0470,
                                              0.0097,
                                              size=self.MC_array_len)
        #self.m_SIV_correction   = random.normal(1.109,  0.01, size = self.MC_array_len)
        #self.n_SIV_correction   = random.normal(0.135,  0.0173, size = self.MC_array_len)

        #CHAOS relation TNII-TSIII
        #T[SIII]  = 1.312(+-0.075)T[NII]-0.313(+-0.058)
        #TNII     = (0.762+-0.044)*TSIII  + 0.239+-0.046
        self.m_TNII_correction = random.normal(0.762,
                                               0.044,
                                               size=self.MC_array_len)
        self.n_TNII_correction = random.normal(0.239,
                                               0.046,
                                               size=self.MC_array_len)

        #Truncated gaussian for the density
        lower_trunc, upper_trunc = (1.0 - 50.0) / 25.0, (100 - 50) / 25.0
        self.Truncated_gaussian = truncnorm(lower_trunc,
                                            upper_trunc,
                                            loc=50,
                                            scale=25)

        print '-Elements loaded\n'

        return

    def declare_object(self, lines_log_frame):

        #List of all parameters
        #         lineRatios      = ['R_SII', 'R_SII_prime', 'R_SIII', 'R_NII', 'R_OII', 'R_OII_prime', 'R_OIII']
        #         elecProperties  = ['neSII', 'neOII', 'TeOII', 'TeSII', 'TeNII', 'TeOIII', 'TeSIII', 'TeOII_from_TeOIII', 'TeNII_from_TeOIII', 'TeSIII_from_TeOIII', 'TeOIII_from_TeSIII']
        #         ionicAbund      = ['SII_HII', 'SIII_HII', 'SIV_HII', 'OII_HII', 'OII_HII_3279A', 'OII_HII_7319A', 'NII_HII', 'ArIII_HII', 'ArIV_HII', 'HeII_HII_from_O',
        #                             'HeIII_HII_from_O', 'HeII_HII_from_S', 'HeIII_HII_from_S']
        #         elemAbund       = ['SI_HI', 'OI_HI', 'NI_OI', 'NI_HI', 'HeI_HI_from_O', 'HeI_HI_from_S', 'Ymass_O', 'Ymass_S']

        self.abunData = Series()

        self.Hbeta_flux = random.normal(
            lines_log_frame.loc['H1_4861A', 'line_Int'].nominal_value,
            lines_log_frame.loc['H1_4861A', 'line_Int'].std_dev,
            size=self.MC_array_len)

        self.low_density_dist = self.Truncated_gaussian.rvs(self.MC_array_len)

        #Generate a dictionary to store the random array for all lines
        self.lines_dict = OrderedDict()

        #Dictionary with lines which my need special treatements
        Blended_lines = {}
        Blended_lines['O2_3726A'] = ('O2_3726A', 'O2_3729A')
        Blended_lines['O2_7319A'] = ('O2_7319A', 'O2_7330A')
        NoError_lines = {}
        NoError_lines['N2_6548A'] = ('N2_6548A')

        #Generate
        for line in lines_log_frame.index.values:

            #Start with the particular cases: Blended lines
            if line in Blended_lines:
                blended_lines = Blended_lines[line]

                if set(lines_log_frame.index) >= set(blended_lines):
                    label_line = line + '+'

                    #Lines are blended we use integrated flux else we add the individual integrated
                    if lines_log_frame.loc[blended_lines[0],
                                           'flux_intg'] == lines_log_frame.loc[
                                               blended_lines[1]]['flux_intg']:
                        flux_line = lines_log_frame.loc[
                            blended_lines[0],
                            'line_IntBrute_dered'].nominal_value
                        error_line = lines_log_frame.loc[
                            blended_lines[0], 'line_IntBrute_dered'].std_dev

                    else:
                        line_sum = lines_log_frame.loc[
                            blended_lines[0],
                            'line_IntBrute_dered'] + lines_log_frame.loc[
                                blended_lines[1], 'line_IntBrute_dered']
                        flux_line = line_sum.nominal_value
                        error_line = line_sum.std_dev

                #Case only one of the lines was measured
                else:
                    label_line = line
                    flux_line = lines_log_frame.loc[line,
                                                    'line_Int'].nominal_value
                    error_line = lines_log_frame.loc[line, 'line_Int'].std_dev

            #Lines with not error
            elif (line in NoError_lines) and (
                    lines_log_frame.loc[line, 'line_Int'].std_dev == 0.0):
                label_line = line
                flux_line = lines_log_frame.loc[line, 'line_Int'].nominal_value
                error_line = lines_log_frame.loc[
                    'N2_6584A', 'line_Int'].std_dev / self.N2_6000_ratio

            #None-issue lines
            else:
                label_line = line
                flux_line = lines_log_frame.loc[line, 'line_Int'].nominal_value
                error_line = lines_log_frame.loc[line, 'line_Int'].std_dev

            #Generate line gaussian shaped array
            self.lines_dict[label_line] = random.normal(flux_line,
                                                        error_line,
                                                        size=self.MC_array_len)

        return

    def den_temp_diagnostic_pair(self,
                                 diagProperties,
                                 den_distribution=None,
                                 atom_temp=None):

        #Check if all necessary lines are there
        if self.lines_dict.viewkeys() >= set(
                diagProperties['required_denlines'] +
                diagProperties['required_temlines']):

            if den_distribution is None:
                den_ratio = numexpr.evaluate(diagProperties['den_ratio'],
                                             self.lines_dict)
                tem_ratio = numexpr.evaluate(diagProperties['tem_ratio'],
                                             self.lines_dict)

                Te, ne = self.diags.getCrossTemDen(
                    diag_tem=diagProperties['diag_tem'],
                    diag_den=diagProperties['diag_den'],
                    value_tem=tem_ratio,
                    value_den=den_ratio)

            else:
                tem_ratio = numexpr.evaluate(diagProperties['tem_ratio'],
                                             self.lines_dict)
                Te = atom_temp.getTemDen(
                    tem_ratio,
                    den=den_distribution,
                    to_eval=diagProperties['atom_temdiag'])
                ne = den_distribution

        #else empty (nan) arrays
        else:
            Te, ne = empty(self.MC_array_len), empty(self.MC_array_len)
            Te[:], ne[:] = np_nan, np_nan

        return Te, ne

    def determine_electron_parameters(self, objectData):

        #----------To start make sure we are not in the very low density regimes,
        low_density_dist = None
        if self.lines_dict.viewkeys() >= {'S2_6716A', 'S2_6731A'}:
            S2_ratio = mean(self.lines_dict['S2_6716A']) / mean(
                self.lines_dict['S2_6731A'])
            if S2_ratio > 1.35:
                print '--Low density object'
                lower, upper, mu, sigma = 1.0, 100.0, 50.0, 25.0
                X_func = truncnorm((lower - mu) / sigma, (upper - mu) / sigma,
                                   loc=mu,
                                   scale=sigma)
                low_density_dist = X_func.rvs(self.MC_array_len)
                self.abunData['neSII'] = low_density_dist
                if low_density_dist is None:
                    print 'WARNING: QUE PASA!!!!!!'
                    print lower, upper, mu, sigma
                    print xrange

        #-----------Sulfur
        diagProperties = {}
        diagProperties['required_denlines'] = ['S2_6716A', 'S2_6731A']
        diagProperties['required_temlines'] = [
            'S3_9069A', 'S3_9531A', 'S3_6312A'
        ] if objectData.SIII_lines == 'BOTH' else [objectData.SIII_lines
                                                   ] + ['S3_6312A']
        diagProperties['diag_den'] = '[SII] 6731/6716'
        diagProperties['diag_tem'] = '[SIII] 6312/9200+'
        diagProperties['atom_temdiag'] = 'L(6312)/(L(9069)+L(9531))'
        diagProperties['den_ratio'] = 'S2_6731A/S2_6716A'
        diagProperties[
            'tem_ratio'] = 'S3_6312A/(S3_9069A+S3_9531A)' if objectData.SIII_lines == 'BOTH' else 'S3_6312A/({valid_line} * {line_factor})'.format(
                valid_line=objectData.SIII_lines,
                line_factor=self.lines_factors[objectData.SIII_lines])

        if '*' in diagProperties['tem_ratio']:
            print '-- Using factor', diagProperties['tem_ratio']

        S3_lines = [
            'S3_9069A', 'S3_9531A', 'S3_6312A'
        ] if objectData.SIII_lines == 'BOTH' else [objectData.SIII_lines
                                                   ] + ['S3_6312A']

        #--Calculate NeSII and TeSIII
        self.abunData['TeSIII'], neSII_TSIII = self.den_temp_diagnostic_pair(
            diagProperties, low_density_dist, atom_temp=self.S3_atom)

        #--Determine empirical TOIII from TSIII  #Epm & Diaz 2005
        self.abunData['TeOIII_from_TeSIII'] = (
            0.95 * self.abunData.TeSIII / 10000 + 0.076) * 10000

        diagProperties = {}
        diagProperties['required_denlines'] = ['S2_6716A', 'S2_6731A']
        diagProperties['required_temlines'] = ['S2_4069A', 'S2_4076A']
        diagProperties['diag_den'] = '[SII] 6731/6716'
        diagProperties['diag_tem'] = '[SII] 4069/4076'
        diagProperties['atom_temdiag'] = 'L(4069)/L(4076)'
        diagProperties['den_ratio'] = 'S2_6731A/S2_6716A'
        diagProperties['tem_ratio'] = 'S2_4069A/S2_4076A'

        #       #--Calculate NeSII and TeSII
        self.abunData['TeSII'], neSII_TSII = self.den_temp_diagnostic_pair(
            diagProperties, low_density_dist, atom_temp=self.S2_atom)

        #-----------Oxygen
        diagProperties = {}
        diagProperties['required_denlines'] = ['S2_6716A', 'S2_6731A']
        diagProperties['required_temlines'] = [
            'O3_4363A', 'O3_4959A', 'O3_5007A'
        ]
        diagProperties['diag_den'] = '[SII] 6731/6716'
        diagProperties['diag_tem'] = '[OIII] 4363/5007+'
        diagProperties['atom_temdiag'] = 'L(4363)/(L(5007)+L(4959))'
        diagProperties['den_ratio'] = 'S2_6731A/S2_6716A'
        diagProperties['tem_ratio'] = 'O3_4363A/(O3_4959A+O3_5007A)'

        #--Calculate NeSII and TeOIII
        self.abunData['TeOIII'], neSII_OIII = self.den_temp_diagnostic_pair(
            diagProperties, low_density_dist, atom_temp=self.O3_atom)

        #--Determine empirical TOIII from TSIII #Epm & Diaz 2005
        self.abunData['TeSIII_from_TeOIII'] = (
            1.05 * self.abunData.TeOIII / 10000 - 0.08) * 10000

        #--Determine empirical TOII from TOIII #Dors Jr 2006
        self.abunData['TeOII_from_TeOIII'] = (1.397 / (
            (1 / (self.abunData.TeOIII / 10000)) + 0.385)) * 10000

        #--Determine empirical TNII from TOIII #Epm 2014
        self.abunData['TeNII_from_TeOIII'] = (1.452 / (
            (1 / (self.abunData.TeOIII / 10000)) + 0.479)) * 10000

        #-----------Nitrogen
        diagProperties = {}
        diagProperties['required_denlines'] = ['S2_6716A', 'S2_6731A']
        diagProperties['required_temlines'] = [
            'N2_5755A', 'N2_6548A', 'N2_6584A'
        ]
        diagProperties['diag_den'] = '[SII] 6731/6716'
        diagProperties['diag_tem'] = '[NII] 5755/6584+'
        diagProperties['atom_temdiag'] = '(L(6584) + L(6548)) / L(5755)'
        diagProperties['den_ratio'] = 'S2_6731A/S2_6716A'
        diagProperties['tem_ratio'] = '(N2_6548A+N2_6584A)/N2_5755A'

        #--Calculate Ne_SII and Te_NII
        self.abunData['TeNII'], neSII_TNII = self.den_temp_diagnostic_pair(
            diagProperties, low_density_dist, atom_temp=self.N2_atom)

        #Assign object density from lines or from the low density distribution
        #--This code favors the neSII calculated from SIII-SII line pai+
        if 'neSII' not in self.abunData:
            if np_sum(isnan(neSII_TSIII)) < self.MC_array_len:
                self.abunData['neSII'] = neSII_TSIII
            elif np_sum(isnan(neSII_OIII)) < self.MC_array_len:
                self.abunData['neSII'] = neSII_OIII
            else:
                self.abunData['neSII'] = neSII_TSIII

        #--Check if some results contain nan entries
        nanCount = OrderedDict()
        for electron_parameter in self.abunData.index:
            variable_array = self.abunData[electron_parameter]
            nan_count = np_sum(isnan(variable_array))
            if nan_count > self.MC_array_len * 0.90:
                self.abunData.drop(electron_parameter, inplace=True)
            elif nan_count > 0:
                mag, error = nanmean(
                    self.abunData[electron_parameter]), nanstd(
                        self.abunData[electron_parameter])
                self.abunData[electron_parameter] = random.normal(
                    mag, error, size=self.MC_array_len)
                if nan_count > self.MC_warning_limit:
                    nanCount[electron_parameter] = nan_count

        #Display calculations with issues
        if len(nanCount) > 0:
            print '-Issues calculating:'
            for element in nanCount:
                print '--', element, nanCount[element]

        return

    def determine_ionic_abundance(self, abund_code, atom, diagnos_eval,
                                  diagnos_mag, tem, den):

        try:
            hbeta_flux = self.Hbeta_flux
        except AttributeError:
            hbeta_flux = self.H1_atom.getEmissivity(tem=tem,
                                                    den=den,
                                                    label='4_2',
                                                    product=False)
            print '--Warning using theoretical Hbeta emissivity'

        #Ionic abundance calculation using pyneb
        ionic_abund = atom.getIonAbundance(int_ratio=diagnos_mag,
                                           tem=tem,
                                           den=den,
                                           to_eval=diagnos_eval,
                                           Hbeta=hbeta_flux)

        #Evaluate the nan array
        nan_idcs = isnan(ionic_abund)
        nan_count = np_sum(nan_idcs)

        #Directly save if not nan
        if nan_count == 0:
            self.abunData[abund_code] = ionic_abund

        #Remove the nan entries performing a normal distribution
        elif nan_count < 0.90 * self.MC_array_len:
            mag, error = nanmean(ionic_abund), nanstd(ionic_abund)

            #Generate truncated array to store the data
            a, b = (0 - mag) / error, (1000 * mag - mag) / error
            new_samples = truncnorm(a, b, loc=mag,
                                    scale=error).rvs(size=nan_count)

            #Replace nan entries
            ionic_abund[nan_idcs] = new_samples
            self.abunData[abund_code] = ionic_abund

            if nan_count > self.MC_warning_limit:
                print '-- {} calculated with {}'.format(abund_code, nan_count)

        return

    def check_obsLines(self, lines_list, just_one_line=False):

        #WARNING it would be better something that reads a standard preference over some.
        eval_lines = map(
            lambda x: 'L({})'.format(x[x.find('_') + 1:len(x) - 1]),
            lines_list)  #Right format for pyneb eval: Ar3_7751A -> L(7751)
        diagnos_eval = None

        #Case all lines are there
        if self.lines_dict.viewkeys() >= set(lines_list):
            diagnos_mag = zeros(self.MC_array_len)
            for i in range(len(lines_list)):
                diagnos_mag += self.lines_dict[lines_list[i]]
            diagnos_eval = '+'.join(eval_lines)

        #Case we can use any line: #WARNING last line is favoured
        elif just_one_line:
            diagnos_mag = zeros(self.MC_array_len)
            for i in range(len(lines_list)):
                if lines_list[i] in self.lines_dict:
                    diagnos_mag = self.lines_dict[lines_list[i]]
                    diagnos_eval = eval_lines[i]

        #Case none of the lines
        if diagnos_eval is None:
            diagnos_mag = self.generate_nan_array()
            diagnos_eval = '+'.join(eval_lines)

        return diagnos_eval, diagnos_mag

    def argon_abundance_scheme(self, Tlow, Thigh, ne):

        #Calculate the Ar_+2 abundance according to the lines observed
        Ar3_lines = ['Ar3_7136A', 'Ar3_7751A']
        diagnos_eval, diagnos_mag = self.check_obsLines(Ar3_lines,
                                                        just_one_line=True)
        self.determine_ionic_abundance('ArIII_HII', self.Ar3_atom,
                                       diagnos_eval, diagnos_mag, Tlow, ne)

        #Calculate the Ar_+3 abundance according to the lines observed
        Ar4_lines = ['Ar4_4740A', 'Ar4_4711A']
        diagnos_eval, diagnos_mag = self.check_obsLines(Ar4_lines,
                                                        just_one_line=True)
        self.determine_ionic_abundance('ArIV_HII', self.Ar4_atom, diagnos_eval,
                                       diagnos_mag, Thigh, ne)

    def oxygen_abundance_scheme(self, Tlow, Thigh, ne):

        #Calculate the O_+1 abundances from 3200+ lines
        O2_lines = ['O2_3726A+']
        diagnos_eval, diagnos_mag = self.check_obsLines(O2_lines)
        diagnos_eval = 'L(3726)+L(3729)'
        self.determine_ionic_abundance('OII_HII_3279A', self.O2_atom,
                                       diagnos_eval, diagnos_mag, Tlow, ne)

        #Calculate the O_+1 abundances from 7300+ lines
        O2_lines = ['O2_7319A+']
        diagnos_eval, diagnos_mag = self.check_obsLines(O2_lines)
        diagnos_eval = 'L(7319)+L(7330)'
        self.determine_ionic_abundance('OII_HII_7319A', self.O2_atom,
                                       diagnos_eval, diagnos_mag, Tlow, ne)

        #--Correction for recombination contribution Liu2000
        if 'OII_HII_7319A' in self.abunData:

            try:
                hbeta_flux = self.Hbeta_flux
            except AttributeError:
                hbeta_flux = self.H1_atom.getEmissivity(tem=Tlow,
                                                        den=ne,
                                                        label='4_2',
                                                        product=False)
                print '--Warning using theoretical Hbeta emissivity'

            Lines_Correction = (9.36 * power((Tlow / 10000), 0.44) *
                                self.abunData.OII_HII_7319A) * hbeta_flux
            ratio = self.lines_dict['O2_7319A+'] - Lines_Correction
            self.determine_ionic_abundance('OII_HII_7319A', self.O2_atom,
                                           diagnos_eval, ratio, Tlow, ne)

        #Get the ratios for empirical relation between OII lines
        if 'O2_3726A+' in self.lines_dict:
            self.abunData[
                'O_R3200'] = self.lines_dict['O2_3726A+'] / self.Hbeta_flux
            print 'O_R3200', mean(self.abunData['O_R3200'])
            print 'OII_HII_3279A', mean(self.abunData['OII_HII_3279A'])
            print 'Original flux', mean(self.lines_dict['O2_3726A+'])

        if 'O2_7319A+' in self.lines_dict:
            self.abunData[
                'O_R7300'] = self.lines_dict['O2_7319A+'] / self.Hbeta_flux
            print 'OII_HII_7319A', mean(self.abunData['OII_HII_7319A'])
        if self.lines_dict.viewkeys() >= set(['O3_5007A']):
            self.abunData[
                'O_R3'] = self.lines_dict['O3_5007A'] / self.Hbeta_flux

        #Calculate the abundance from the empirical O_R3200_ffO2
        if set(self.abunData.index) >= {'O_R7300', 'O_R3'}:
            logRO2 = 1.913 + log10(self.abunData['O_R7300']) - 0.374 * log10(
                self.abunData['O_R3']) / 0.806
            print 'logRO2', mean(logRO2)
            RO2 = power(10, logRO2)
            self.abunData['O_R3200_ffO2'] = RO2
            print 'O_R3200_ffO2', mean(self.abunData['O_R3200_ffO2'])
            print 'RO2*Hbeta', mean(RO2 * self.Hbeta_flux)
            diagnos_eval = 'L(3726)+L(3729)'
            self.determine_ionic_abundance('OII_HII_ffO2', self.O2_atom,
                                           diagnos_eval, RO2 * self.Hbeta_flux,
                                           Tlow, ne)
            print 'OII_HII_ffO2', mean(self.abunData['OII_HII_ffO2'])

        #Calculate the O_+2 abundance
        O3_lines = ['O3_4959A', 'O3_5007A']
        diagnos_eval, diagnos_mag = self.check_obsLines(O3_lines)
        self.determine_ionic_abundance('OIII_HII', self.O3_atom, diagnos_eval,
                                       diagnos_mag, Thigh, ne)

        #Determine the O/H abundance (favoring the value from OII_HII
        if set(self.abunData.index) >= {'OII_HII_3279A', 'OIII_HII'}:
            self.abunData['OII_HII'] = self.abunData['OII_HII_3279A']
            self.abunData['OI_HI'] = self.abunData[
                'OII_HII_3279A'] + self.abunData['OIII_HII']
        elif set(self.abunData.index) >= {'OII_HII_7319A', 'OIII_HII'}:
            self.abunData['OII_HII'] = self.abunData['OII_HII_7319A']
            self.abunData['OI_HI'] = self.abunData[
                'OII_HII_7319A'] + self.abunData['OIII_HII']

        if set(self.abunData.index) >= {'OII_HII_ffO2', 'OIII_HII'}:
            if set(self.abunData.index) >= {'OII_HII_3279A'}:
                self.abunData['OI_HI_ff02'] = self.abunData[
                    'OII_HII_3279A'] + self.abunData['OIII_HII']
            else:
                self.abunData['OI_HI_ff02'] = self.abunData[
                    'OII_HII_ffO2'] + self.abunData['OIII_HII']

        return

    def nitrogen_abundance_scheme(self, Tlow, ne):

        #Calculate TNII temperature from the CHAOS relation
        T_NII = Tlow  #self.m_TNII_correction * Tlow + self.n_TNII_correction

        #Calculate the N+1 abundance
        N2_lines = ['N2_6548A', 'N2_6584A']
        diagnos_eval, diagnos_mag = self.check_obsLines(N2_lines)
        self.determine_ionic_abundance('NII_HII', self.N2_atom, diagnos_eval,
                                       diagnos_mag, T_NII, ne)

        #Calculate NI_HI using the OI_HI
        if set(self.abunData.index) >= {'NII_HII', 'OI_HI'}:

            #Compute  NI_OI
            self.abunData[
                'NI_OI'] = self.abunData['NII_HII'] / self.abunData['OII_HII']
            self.abunData[
                'NI_HI'] = self.abunData['NI_OI'] * self.abunData['OI_HI']

#             #Repeat calculation if 5755 line was observed to include the recombination contribution
#             if self.lines_dict.viewkeys() >= {'N2_5755A'}:
#
#                 NIII_HI             = self.abunData.NI_HI - self.abunData['NII_HII']
#                 Lines_Correction    = 3.19 * power((Thigh/10000), 0.30) * NIII_HI * self.Hbeta_flux
#                 self.abunData['TNII'], nSII = self.diags.getCrossTemDen(diag_tem = '[NII] 5755/6584+',
#                                                                         diag_den  = '[SII] 6731/6716',
#                                                                         value_tem = (self.lines_dict['N2_5755A'] - Lines_Correction)/(self.lines_dict['N2_6548A'] + self.lines_dict['N2_6584A']),
#                                                                         value_den = self.lines_dict['S2_6731A']/self.lines_dict['S2_6716A'])
#
#                 Ratio = self.lines_dict['N2_6548A'] + self.lines_dict['N2_6584A']
#                 self.determine_ionic_abundance('NII_HII', self.N2_atom, Ratio, diagnos_mag, self.abunData['TNII'], ne)
#
#                 self.abunData['NI_OI'] = self.abunData['NII_HII'] / self.abunData['OII_HII']
#                 self.abunData['NI_HI'] = self.abunData['NI_OI'] * self.abunData['OI_HI']

        return

    def sulfur_abundance_scheme(self, Tlow, ne, SIII_lines_to_use):

        print 'Metiendo esto', SIII_lines_to_use

        #Calculate the S+1 abundance
        S2_lines = ['S2_6716A', 'S2_6731A']
        diagnos_eval, diagnos_mag = self.check_obsLines(S2_lines)
        self.determine_ionic_abundance('SII_HII', self.S2_atom, diagnos_eval,
                                       diagnos_mag, Tlow, ne)

        #Calculate the S+2 abundance
        S3_lines = ['S3_9069A', 'S3_9531A'
                    ] if SIII_lines_to_use == 'BOTH' else [SIII_lines_to_use]
        diagnos_eval, diagnos_mag = self.check_obsLines(S3_lines)
        if set(S3_lines) != set(['S3_9069A', 'S3_9531A']):
            print '-- Using SIII lines', diagnos_eval

        self.determine_ionic_abundance('SIII_HII', self.S3_atom, diagnos_eval,
                                       diagnos_mag, Tlow, ne)

        #Calculate the total sulfur abundance
        if set(self.abunData.index) >= {'SII_HII', 'SIII_HII'}:

            self.abunData[
                'SI_HI'] = self.abunData['SII_HII'] + self.abunData['SIII_HII']

            #Add the S+3 component if the argon correction is found
            if set(self.abunData.index) >= {'ArIII_HII', 'ArIV_HII'}:

                logAr2Ar3 = log10(self.abunData['ArIII_HII'] /
                                  self.abunData['ArIV_HII'])
                logSIV = log10(self.abunData['SIII_HII']) - (
                    logAr2Ar3 - self.n_SIV_correction) / self.m_SIV_correction
                SIV_HII = power(10, logSIV)

                # Evaluate the nan array
                nan_idcs = isnan(SIV_HII)
                nan_count = np_sum(nan_idcs)

                # Directly save if not nan
                if nan_count == 0:
                    self.abunData['SIV_HII'] = SIV_HII

                # Remove the nan entries performing a normal distribution
                elif nan_count < 0.90 * self.MC_array_len:
                    mag, error = nanmean(SIV_HII), nanstd(SIV_HII)

                    # Generate truncated array to store the data
                    a, b = (0 - mag) / error, (1000 * mag - mag) / error
                    new_samples = truncnorm(a, b, loc=mag,
                                            scale=error).rvs(size=nan_count)

                    # Replace nan entries
                    SIV_HII[nan_idcs] = new_samples
                    self.abunData['SIV_HII'] = SIV_HII

                    if nan_count > self.MC_warning_limit:
                        print '-- {} calculated with {}'.format(
                            'SIV_HII', nan_count)

                self.abunData[
                    'SI_HI'] = self.abunData['SII_HII'] + self.abunData[
                        'SIII_HII'] + self.abunData['SIV_HII']
                self.abunData['ICF_SIV'] = self.abunData['SI_HI'] / (
                    self.abunData['SII_HII'] + self.abunData['SIII_HII'])

        return

    def helium_abundance_elementalScheme(self,
                                         Te,
                                         ne,
                                         lineslog_frame,
                                         metal_ext=''):

        #Check temperatures are not nan before starting the treatment
        if (not isinstance(Te, float)) and (not isinstance(ne, float)):

            #HeI_indices = (lineslog_frame.Ion.str.contains('HeI_')) & (lineslog_frame.index != 'He1_8446A')  & (lineslog_frame.index != 'He1_7818A') & (lineslog_frame.index != 'He1_5016A')
            HeI_indices = (lineslog_frame.Ion.str.contains('HeI_')) & (
                lineslog_frame.index.isin(
                    ['He1_4472A', 'He1_5876A', 'He1_6678A']))
            HeI_labels = lineslog_frame.loc[HeI_indices].index.values
            HeI_ions = lineslog_frame.loc[HeI_indices].Ion.values

            Emis_Hbeta = self.H1_atom.getEmissivity(tem=Te,
                                                    den=ne,
                                                    label='4_2',
                                                    product=False)

            #--Generating matrices with fluxes and emissivities
            for i in range(len(HeI_labels)):

                pyneb_code = float(HeI_ions[i][HeI_ions[i].find('_') +
                                               1:len(HeI_ions[i])])
                line_relative_Flux = self.lines_dict[
                    HeI_labels[i]] / self.Hbeta_flux
                line_relative_emissivity = self.He1_atom.getEmissivity(
                    tem=Te, den=ne, wave=pyneb_code,
                    product=False) / Emis_Hbeta
                line_relative_emissivity = self.check_nan_entries(
                    line_relative_emissivity)

                if i == 0:
                    matrix_HeI_fluxes = copy(line_relative_Flux)
                    matrix_HeI_emis = copy(line_relative_emissivity)
                else:
                    matrix_HeI_fluxes = vstack(
                        (matrix_HeI_fluxes, line_relative_Flux))
                    matrix_HeI_emis = vstack(
                        (matrix_HeI_emis, line_relative_emissivity))

            matrix_HeI_fluxes = transpose(matrix_HeI_fluxes)
            matrix_HeI_emis = transpose(matrix_HeI_emis)

            #Perform the fit
            params = Parameters()
            params.add('Y', value=0.01)
            HeII_HII_array = zeros(len(matrix_HeI_fluxes))
            HeII_HII_error = zeros(len(matrix_HeI_fluxes))
            for i in range(len(matrix_HeI_fluxes)):
                fit_Output = lmfit_minimmize(residual_Y_v3,
                                             params,
                                             args=(matrix_HeI_emis[i],
                                                   matrix_HeI_fluxes[i]))
                HeII_HII_array[i] = fit_Output.params['Y'].value
                HeII_HII_error[i] = fit_Output.params['Y'].stderr

            #NO SUMANDO LOS ERRORES CORRECTOS?
            #self.abunData['HeII_HII_from_' + metal_ext] = random.normal(mean(HeII_HII_array), mean(HeII_HII_error), size = self.MC_array_len)
            ionic_abund = random.normal(mean(HeII_HII_array),
                                        mean(HeII_HII_error),
                                        size=self.MC_array_len)

            #Evaluate the nan array
            nan_count = np_sum(isnan(ionic_abund))
            if nan_count == 0:
                self.abunData['HeII_HII_from_' + metal_ext] = ionic_abund
            #Remove the nan entries performing a normal distribution
            elif nan_count < 0.90 * self.MC_array_len:
                mag, error = nanmean(ionic_abund), nanstd(ionic_abund)
                self.abunData['HeII_HII_from_' + metal_ext] = random.normal(
                    mag, error, size=self.MC_array_len)
                if nan_count > self.MC_warning_limit:
                    print '-- {} calculated with {}'.format(
                        'HeII_HII_from_' + metal_ext, nan_count)

            #Calculate the He+2 abundance
            if self.lines_dict.viewkeys() >= {'He2_4686A'}:
                #self.abunData['HeIII_HII_from_' + metal_ext] = self.He2_atom.getIonAbundance(int_ratio = self.lines_dict['He2_4686A'], tem=Te, den=ne, wave = 4685.6, Hbeta = self.Hbeta_flux)
                self.determine_ionic_abundance('HeIII_HII_from_' + metal_ext,
                                               self.He2_atom, 'L(4685)',
                                               self.lines_dict['He2_4686A'],
                                               Te, ne)

            #Calculate elemental abundance
            Helium_element_keys = [
                'HeII_HII_from_' + metal_ext, 'HeIII_HII_from_' + metal_ext
            ]
            if set(self.abunData.index) >= set(Helium_element_keys):
                self.abunData['HeI_HI_from_' +
                              metal_ext] = self.abunData[Helium_element_keys[
                                  0]] + self.abunData[Helium_element_keys[1]]
            else:
                self.abunData['HeI_HI_from_' + metal_ext] = self.abunData[
                    Helium_element_keys[0]]

            #Proceed to get the Helium mass fraction Y
            Element_abund = metal_ext + 'I_HI'
            Y_fraction, Helium_abund = 'Ymass_' + metal_ext, 'HeI_HI_from_' + metal_ext
            if set(self.abunData.index) >= {Helium_abund, Element_abund}:
                self.abunData[Y_fraction] = (
                    4 * self.abunData[Helium_abund] *
                    (1 - 20 * self.abunData[Element_abund])) / (
                        1 + 4 * self.abunData[Helium_abund])

    def store_abundances_excel(self, objCode, catalogue_df, extension=''):

        #Store the values using the mean and the std from the array
        for parameter in self.abunData.index:

            mean_value, std_value = mean(self.abunData[parameter]), std(
                self.abunData[parameter])

            if (~isnan(mean_value)) & (~isnan(std_value)):
                catalogue_df.loc[objCode, parameter + extension] = ufloat(
                    mean_value, std_value)
            else:
                catalogue_df.loc[objCode, parameter + extension] = np_nan

            print parameter, mean_value, std_value

        return

    def generate_nan_array(self):

        nan_array = empty(self.MC_array_len)
        nan_array[:] = np_nan

        return nan_array

    def check_nan_entries(self, input_array):

        nan_count = np_sum(isnan(input_array))

        if nan_count > 0:
            mag, error = nanmean(input_array), nanstd(input_array)
            new_distr = random.normal(mag, error, size=self.MC_array_len)
            if nan_count > 0.1 * self.MC_array_len:
                print '--Helium issue with {} nans'.format(nan_count)
        else:
            new_distr = input_array

        return new_distr
Beispiel #21
0
class Maps(MutableMapping):
    """
    Converts a dictionary of key:value pairs into a dotted
    representation of those keys. Normal string representation of
    keys is still accessible via normal dictionary indexing.

    Note:
        If a key contains non-alphanumeric characters
        (!@#$%, etc, including spaces), they will be replaced with
        an underscore (_).

    Examples:
        >>> # Normal usage
        >>> test = {"hello": "world"}
        >>> print(Maps(test))

        Output: Maps(hello="world")

        >>> test = {"hello": "world"}
        >>> maps = Maps(test)
        >>> print(maps.hello)

        Output: "world"

        >>> test = {"hello": "world"}
        >>> maps = Maps(test)
        >>> print(maps["hello"])

        Output: "world"

        >>> # If a dictionary key has non-alphanumeric characters
        >>> # Notice how a series of special characters is replaced
        >>> # by only a single underscore
        >>> test = {"hello joh*&^n": "hi computer"}
        >>> maps = Maps(test)
        >>> print(maps)

        Output: Maps(hello_joh_n="hi computer")

    Raises:
        ValueError:
            An argument is of a legal type but is, or contains, an
            illegal value.
    """

    # Class-level variables
    _dynamic: bool
    _map: OrderedDict

    def __init__(self, *args, **kwargs) -> None:
        super().__init__()

        self._dynamic = True
        self._map = OrderedDict()

        if kwargs:
            for key, value in self._get_items(kwargs):
                key = re.sub('[^0-9a-zA-Z]+', '_', key)
                if key != '_dynamic':
                    self._map[key] = value
                else:
                    self._dynamic = value

        if args:
            dictionary = args[0]

            if not isinstance(dictionary, dict):
                raise ValueError(
                    "object passed to constructor must be of type 'dict': "
                    f"'{type(dictionary).__name__}'")

            # Recursive handling
            tracked_ids = {id(dictionary): self}
            for key, value in self._get_items(dictionary):
                if isinstance(key, str):
                    key = re.sub('[^0-9a-zA-Z]+', '_', key)

                value_id = id(value)
                if isinstance(value, dict):
                    if value_id in tracked_ids:
                        value = tracked_ids[value_id]
                    else:
                        value = self.__class__(value, _dynamic=self._dynamic)
                        tracked_ids[value_id] = value

                if isinstance(value, list):
                    listed_items = []

                    for item in value:
                        temp_item = item
                        if isinstance(item, dict):
                            temp_item = self.__class__(item,
                                                       _dynamic=self._dynamic)
                        listed_items.append(temp_item)

                    value = listed_items
                try:
                    self._map[key] = ast.literal_eval(value)
                except NameError:
                    if value.lower() == "false":
                        self._map[key] = False
                    elif value.lower() == "true":
                        self._map[key] = True
                    else:
                        self._map[key] = value
                except (SyntaxError, ValueError):
                    # Cannot eval this value
                    self._map[key] = value

    # Dunder methods

    def __add__(self, value: object) -> Union[Any, NoReturn]:
        if self.empty():
            return value
        else:
            self_type = type(self).__name__
            value_type = type(value).__name__
            raise TypeError(
                f"unsupported operand type(s) for +: '{self_type}' and '{value_type}'"
            )

    def __cmp__(self, value: object) -> Any:
        value = Maps.parse_value(value)
        return self._map.__cmp__(value)

    def __contains__(self, name: str) -> bool:
        return self._map.__contains__(name)

    def __copy__(self) -> Maps:
        return self.__class__(self)

    def __deepcopy__(self) -> Maps:
        return self.copy()

    def __delitem__(
            self,
            key: str,
            dict_delitem: Optional[Callable[...,
                                            Any]] = dict.__delitem__) -> Any:
        return self._map.__delitem__(key, dict_delitem=dict_delitem)

    def __dir__(self) -> Iterable:
        return self.keys()

    def __eq__(self, value: Any) -> bool:
        value = Maps.parse_value(value)
        if not isinstance(value, dict):
            return False
        return self._map.__eq__(value)

    def __ge__(self, value: Any) -> bool:
        value = Maps.parse_value(value)
        return self._map.__ge__(value)

    def __gt__(self, value: Any) -> bool:
        value = Maps.parse_value(value)
        return self._map.__gt__(value)

    def __iter__(self) -> Iterable:
        return self._map.__iter__()

    def __le__(self, value: Any) -> bool:
        value = Maps.parse_value(value)
        return self._map.__le__(value)

    def __len__(self) -> int:
        return self._map.__len__()

    def __lt__(self, value: Any) -> bool:
        value = Maps.parse_value(value)
        return self._map.__lt__(value)

    def __ne__(self, value: Any) -> bool:
        value = Maps.parse_value(value)
        return self._map.__ne__(value)

    def __repr__(self) -> str:
        return str(self)

    def __str__(self) -> str:
        items = []

        for key, value in self._get_items(self._map):

            # Recursive assignment case
            if id(value) == id(self):
                items.append("{0}={1}(...)".format(key,
                                                   self.__class__.__name__))
            else:
                items.append("{0}={1}".format(key, repr(value)))

        joined = ", ".join(items)
        return "{0}({1})".format(self.__class__.__name__, joined)

    def __delattr__(self, name: str) -> None:
        self._map.__delitem__(name)

    def __getattr__(self, name: str) -> Any:
        if name in ('_map', '_dynamic',
                    "_ipython_canary_method_should_not_exist_"):
            return super().__getattr__(name)

        try:
            return super(self.__class__, self).__getattribute__(name)
        except AttributeError:
            pass

        return self[name]

    def __setattr__(self, name: str, value: Any) -> None:
        if name in ('_map', '_dynamic',
                    "_ipython_canary_method_should_not_exist_"):
            super().__setattr__(name, value)
        else:
            self[name] = value

    def __getitem__(self, name: str) -> Union[Any, Maps]:
        if (name not in self._map and self._dynamic
                and name != "_ipython_canary_method_should_not_exist_"):
            self[name] = self.__class__()

        return self._map[name]

    def __setitem__(self, name: str, value: Any) -> None:
        self._map[name] = value

    def __getstate__(self) -> dict:
        return self.__dict__

    def __setstate__(self, value: dict) -> None:
        self.__dict__.update(value)

    # Internal methods

    def _get_items(self, item: Any) -> Iterable:
        if hasattr(item, 'iteritems') and ismethod(getattr(item, 'iteritems')):
            return item.iteritems()
        else:
            return item.items()

    # Public methods

    def clear(self) -> None:
        """Remove all items from the Maps object."""

        self._map.clear()

    def copy(self) -> Maps:
        """Makes a copy of the Maps object in memory."""

        return self.__copy__()

    def empty(self) -> bool:
        """Returns whether the Maps object is empty."""

        return (not any(self))

    @classmethod
    def fromkeys(cls,
                 iterable: Iterable,
                 value: Optional[Any] = None) -> Iterable:
        """Returns a new :obj:`Maps` object with keys supplied from an
        iterable setting each key in the object with :term:`value`.

        Args:
            iterable (:obj:`Iterable`):
                Any iterable.
            value (:obj:`obj`, optional):
                The value to set for the keys.
                Default is :obj:`None`.

        Returns:
            Maps:
                The :obj:`Maps` object.
        """

        maps = cls()
        maps.map = OrderedDict.fromkeys(iterable, value)

        return maps

    def get(self, key: str, default: Optional[Any] = None) -> Any:
        """
        Returns the value of 'key'.

        If :term:`key` does not exist, :term:default` is returned
        instead.

        Args:
            key (:obj:`str`):
                The key to get the value needed from the dict.
            default (:obj:`obj`, optional):
                The value to return if :term:`key` does not exist.

        Returns:
            Any:
                The value at :term:`key` or :term:default`.
        """

        return self._map.get(key, default)

    def has_key(self, key: str) -> bool:
        return key in self._map

    def items(self) -> Generator[Tuple[str, Any]]:
        """Returns a generator yielding a (key, value) pair."""

        return self._get_items(self._map)

    def iteritems(self) -> Iterator:
        """
        Returns an iterator over the Maps oject's (key, value)
        pairs.
        """

        return self.items()

    def iterkeys(self) -> Iterator:
        """Returns an iterator over the Maps object's keys."""

        return self._map.iterkeys()

    def itervalues(self) -> Iterator:
        """Returns an iterator over the Maps object's values."""

        return self._map.itervalues()

    def keys(self) -> Iterable:
        """Returns the keys of the Maps object."""

        return self._map.keys()

    def next(self) -> str:
        """Returns the next key in the dictionary."""

        return self._map.next()

    @classmethod
    def parse_ini(cls,
                  ini_dict: ConfigParser,
                  to_maps=False) -> Union[dict, Maps]:
        """
        Converts the values from an INI file from all strings to their
        actual Python base-types (i.e. int, float, bool, etc).

        If the value cannot be converted, it is kept as a string.
        If a value of the key:value pairs is not a string, its type is
        maintained.

        Note:
            Any meant-to-be-bool values in the key:value pairs that are
            not exactly 'False' or 'True', but are similar like 'false'
            or 'tRue' for example, will be converted to bools.

        Args:
            ini_dict (:obj:`ConfigParser`):
                The dictionary returned by configparser when an INI file
                is loaded.
            to_maps (:obj:`bool`):
                Return a :obj:`Maps` object instead of a :obj:`dict`.

        Returns:
            dict or Maps:
                A dictionary maintaining the same key:value pairs as the
                input dictionary; however, the values are their Python
                base-types. If :obj:`to_maps` is :obj:`True`, return a
                :obj:`Maps` object.

        Raises:
            TypeError:
                An argument is of an illegal type.
        """

        # Check for dict because of recursion; ini_dict is only meant
        # to be a dict when the function recursively converts the values
        # from a ConfigParser
        if not isinstance(ini_dict, (dict, ConfigParser)):
            raise TypeError(
                "argument 'ini_dict' must be of type 'ConfigParser': "
                f"{type(ini_dict).__name__}")
        if isinstance(ini_dict, ConfigParser):
            ini_dict_ = {}
            for section in ini_dict.sections():
                ini_dict_[section] = {}
                for option in ini_dict.options(section):
                    # Parse using configparser
                    option_value = ini_dict.get(section, option)

                    # Parse using os environ
                    matches = [(m.start(0), m.end(0))
                               for m in re.finditer("&", option_value)]
                    if len(matches) > 0 and len(matches) % 2 == 0:
                        i = 0
                        while True:
                            try:
                                index_end = matches.pop(i + 1)[1]
                                index_start = matches.pop(i)[0]
                                sub = option_value[index_start:index_end]
                                sub_replace = os.environ[sub[1:-1]]
                                option_value = option_value.replace(
                                    sub, sub_replace)
                            except IndexError:
                                break
                            except KeyError:
                                pass
                    ini_dict_[section][option] = option_value
            ini_dict = ini_dict_

        for key, value in ini_dict.items():
            if isinstance(value, dict):
                # Recursively parse dict
                ini_dict[key] = Maps.parse_ini(value, to_maps=to_maps)
            else:
                if not isinstance(value, str):
                    continue
                try:
                    ini_dict[key] = ast.literal_eval(value)
                except NameError:
                    if value.lower() == "false":
                        ini_dict[key] = False
                    elif value.lower() == "true":
                        ini_dict[key] = True
                    else:
                        ini_dict[key] = value
                except (SyntaxError, ValueError):
                    # Cannot eval this value
                    ini_dict[key] = value
        return Maps(ini_dict) if to_maps else ini_dict

    @classmethod
    def parse_value(cls, value: Any) -> Any:
        """
        Checks if :term:`value` subclasses :obj:`Maps`. If so, it
        returns the :obj:`Maps` object; otherwise the :term:`value`
        itself.

        Args:
            value (:obj:`Any`):
                The value to parse.

        Returns:
            Any:
                :obj:`OrderedDict` if :term:`value` subclasses
                :obj:`Maps`, otherwise :term:`value`.
        """

        if issubclass(type(value), Maps):
            return value.map
        else:
            return value

    def pop(self,
            key: str,
            default: Optional[Any] = None) -> Union[Any, NoReturn]:
        """
        Removes and returns the value in the Maps object at 'key'. If
        'key' does not exist, then 'default' is returned.

        Args:
            key (:obj:`str`):
                The key to use to remove a value from the Maps object.
            default (:obj:`obj`, optional):
                The value to return if :term:`key` does not exist in the
                :obj:`Maps` object.

        Returns:
            Any:
                The value at :term:`key`, otherwise :term:`default`.
        """

        return self._map.pop()

    def popitem(self) -> Any:
        """Removes and returns an arbitrary (key, value) pair from the
        :obj:`Maps` object.

        Returns:
            Any:
                The arbitrary (key, value) pair.

        Raises:
            KeyError:
                The :obj:`Maps` object is empty.
        """

        return self._map.popitem()

    def setdefault(self, key: str, default=None) -> Any:
        """
        Returns a value of the 'key' in the Maps object.

        If 'key' is not found, then 'default' is inserted at 'key' into
        the Maps object and then returns that value.

        Args:
            key: The key to return the value of.
            default (:obj:`obj`, optional): The value to insert if 'key'
                                            does not exist. Defaults to
                                            none.

        Returns:
            object: The object at 'key' in the Maps object, default'
                    otherwise.
        """

        return self._map.setdefault(key, default)

    def to_dict(self) -> Union[dict, NoReturn]:
        """Converts the :obj:`Maps` object to a stdlib dictionary.

        Returns:
            dict:
                The converted :obj:`Maps` object as a dictionary.
        """

        new_dict = {}

        for key, value in self.items():
            if issubclass(type(value), Maps):
                if id(value) == id(self):
                    value = new_dict
                else:
                    value = value.to_dict()
            elif isinstance(value, (tuple, list)):
                new_list = []

                for item in value:
                    temp_item = item
                    if issubclass(type(item), Maps):
                        temp_item = item.to_dict()
                    new_list.append(temp_item)

                if isinstance(value, tuple):
                    value = tuple(new_list)
                else:
                    value = new_list

            new_dict[key] = value
        return new_dict

    def update(self, *args, **kwargs) -> None:
        """Adds or changes existing values using a dictionary or
        iterator of key:value pairs."""

        if len(args) != 0:
            self._map.update(*args)
        self._map.update(kwargs)

    def values(self) -> Any:
        """Returns the values of the :obj:`Maps` object."""

        return self._map.values()

    def viewitems(self) -> Any:
        """Returns a new view of the :obj:`Maps` object's items
        (key:value pairs)."""

        return self._map.viewitems()

    def viewkeys(self) -> Any:
        """Returns a new view of the :obj:`Maps` object's keys."""

        return self._map.viewkeys()

    def viewvalues(self) -> Any:
        """Returns a new view of the :obj:`Maps` object's values."""

        return self._map.viewvalues()
Beispiel #22
0
class Config(MutableMapping, OrderedDict):
    @classmethod
    def load(cls, file_path):
        with open(file_path) as f:
            params = yaml.load(f.read(), Loader=yaml.FullLoader)

        # We expand ~ in those yaml entries with `path`
        # on their keys for making
        # config files more platform-independent
        params = {
            key: (os.path.expanduser(value)
                  if "path" in key and value is not None else value)
            for key, value in params.items()
        }

        return cls(params)

    def dump(self, file_path):
        with open(file_path, "w") as f:
            d = self.to_dict()
            f.write(yaml.dump(d))

    def __init__(self, *args, **kwargs):
        self._map = OrderedDict()

        if args:
            d = args[0]
            # for recursive assignment handling
            trackedIDs = {id(d): self}
            if isinstance(d, dict):
                for k, v in self.__call_items(d):
                    if isinstance(v, dict):
                        if id(v) in trackedIDs:
                            v = trackedIDs[id(v)]
                        else:
                            v = self.__class__(v)
                            trackedIDs[id(v)] = v
                    if type(v) is list:
                        l = []
                        for i in v:
                            n = i
                            if isinstance(i, dict):
                                n = self.__class__(i)
                            l.append(n)
                        v = l
                    self._map[k] = v
        if kwargs:
            for k, v in self.__call_items(kwargs):
                self._map[k] = v

    _path_state = list()

    def __call_items(self, obj):
        if hasattr(obj, "iteritems") and ismethod(getattr(obj, "iteritems")):
            return obj.iteritems()
        else:
            return obj.items()

    def items(self):
        return self.iteritems()

    def iteritems(self):
        return self.__call_items(self._map)

    def __iter__(self):
        return self._map.__iter__()

    def next(self):
        return self._map.next()

    def __setitem__(self, k, v):
        # print('Called __setitem__')

        if (k in self._map and not self._map[k] is None
                and not isinstance(v, type(self._map[k]))):
            if v is not None:
                raise ValueError(
                    f"Updating existing value {type(self._map[k])} "
                    f"with different type ({type(v)}).")
        split_path = k.split(".")
        current_option = self._map
        for p in split_path[:-1]:
            current_option = current_option[p]
        current_option[split_path[-1]] = v

    def __getitem__(self, k):
        split_path = k.split(".")
        current_option = self._map
        for p in split_path:
            if p not in current_option:
                raise KeyError(p)
            current_option = current_option[p]
        return current_option

    def __setattr__(self, k, v):
        if k in {"_map", "_ipython_canary_method_should_not_exist_"}:
            super(Config, self).__setattr__(k, v)
        else:
            self[k].update(v)

    def __getattr__(self, k):
        if k in {"_map", "_ipython_canary_method_should_not_exist_"}:
            return super(Config, self).__getattr__(k)

        try:
            v = super(self.__class__, self).__getattribute__(k)
            return v
        except AttributeError:
            self._path_state.append(k)
            pass

        return self[k]

    def __delattr__(self, key):
        return self._map.__delitem__(key)

    def __contains__(self, k):
        return self._map.__contains__(k)

    def __add__(self, other):
        if self.empty():
            return other
        else:
            self_type = type(self).__name__
            other_type = type(other).__name__
            msg = "unsupported operand type(s) for +: '{}' and '{}'"
            raise TypeError(msg.format(self_type, other_type))

    def __str__(self):
        items = []
        for k, v in self.__call_items(self._map):
            # recursive assignment case
            if id(v) == id(self):
                items.append("{0}={1}(...)".format(k, self.__class__.__name__))
            else:
                items.append("{0}={1}".format(k, repr(v)))
        joined = ", ".join(items)
        out = "{0}({1})".format(self.__class__.__name__, joined)
        return out

    def __repr__(self):
        return str(self)

    def to_dict(self, flatten=False, parent_key="", sep="."):
        d = {}
        for k, v in self.items():
            if issubclass(type(v), Config):
                # bizarre recursive assignment support
                if id(v) == id(self):
                    v = d
                else:
                    v = v.to_dict()
            elif type(v) in (list, tuple):
                l = []
                for i in v:
                    n = i
                    if issubclass(type(i), Config):
                        n = i.to_dict()
                    l.append(n)
                if type(v) is tuple:
                    v = tuple(l)
                else:
                    v = l
            d[k] = v

        if flatten:
            d = flatten_dict(d, parent_key=parent_key, sep=sep)

        return d

    def pprint(self, ):
        pprint(self.to_dict())

    def empty(self):
        return not any(self)

    # proper dict subclassing
    def values(self):
        return self._map.values()

    # ipython support
    def __dir__(self):
        return list(self.keys())

    def _ipython_key_completions_(self):
        return list(self.keys())

    @classmethod
    def parseOther(cls, other):
        if issubclass(type(other), Config):
            return other._map
        else:
            return other

    def __cmp__(self, other):
        other = Config.parseOther(other)
        return self._map.__cmp__(other)

    def __eq__(self, other):
        other = Config.parseOther(other)
        if not isinstance(other, dict):
            return False
        return self._map.__eq__(other)

    def __ge__(self, other):
        other = Config.parseOther(other)
        return self._map.__ge__(other)

    def __gt__(self, other):
        other = Config.parseOther(other)
        return self._map.__gt__(other)

    def __le__(self, other):
        other = Config.parseOther(other)
        return self._map.__le__(other)

    def __lt__(self, other):
        other = Config.parseOther(other)
        return self._map.__lt__(other)

    def __ne__(self, other):
        other = Config.parseOther(other)
        return self._map.__ne__(other)

    def __delitem__(self, key):
        return self._map.__delitem__(key)

    def __len__(self):
        return self._map.__len__()

    def clear(self):
        self._map.clear()

    def copy(self):
        return self.__class__(self)

    def __copy__(self):
        return self.copy()

    def __deepcopy__(self, memo=None):
        return self.copy()

    def get(self, key, default=None):
        return self._map.get(key, default)

    def has_key(self, key):
        return key in self._map

    def iterkeys(self):
        return self._map.iterkeys()

    def itervalues(self):
        return self._map.itervalues()

    def keys(self):
        return self._map.keys()

    def pop(self, key, default=None):
        return self._map.pop(key, default)

    def popitem(self):
        return self._map.popitem()

    def setdefault(self, key, default=None):
        self._map.setdefault(key, default)

    def update(self, *args, **kwargs):
        if len(args) == 1:
            for key, value in args[0].items():
                if key in self and isinstance(self[key], dict):
                    if value is None:
                        self[key] = value
                    else:
                        self[key].update(value)
                else:
                    pass
                    raise ValueError()
        elif len(args) > 1:
            raise NotImplementedError
            # self._map.update(*args)
        else:
            raise NotImplementedError

    def viewitems(self):
        return self._map.viewitems()

    def viewkeys(self):
        return self._map.viewkeys()

    def viewvalues(self):
        return self._map.viewvalues()

    @classmethod
    def fromkeys(cls, seq, value=None):
        d = cls()
        d._map = OrderedDict.fromkeys(seq, value)
        return d

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)

    # bannerStr

    def _getListStr(self, items):
        out = "["
        mid = ""
        for i in items:
            mid += "  {}\n".format(i)
        if mid != "":
            mid = "\n" + mid
        out += mid
        out += "]"
        return out

    def _getValueStr(self, k, v):
        outV = v
        multiLine = len(str(v).split("\n")) > 1
        if multiLine:
            # push to next line
            outV = "\n" + v
        if type(v) is list:
            outV = self._getListStr(v)
        out = "{} {}".format(k, outV)
        return out

    def _getSubMapDotList(self, pre, name, subMap):
        outList = []
        if pre == "":
            pre = name
        else:
            pre = "{}.{}".format(pre, name)

        def stamp(pre, k, v):
            valStr = self._getValueStr(k, v)
            return "{}.{}".format(pre, valStr)

        for k, v in subMap.items():
            if isinstance(v, Config) and v != Config():
                subList = self._getSubMapDotList(pre, k, v)
                outList.extend(subList)
            else:
                outList.append(stamp(pre, k, v))
        return outList

    def _getSubMapStr(self, name, subMap):
        outList = ["== {} ==".format(name)]
        for k, v in subMap.items():
            if isinstance(v, self.__class__) and v != self.__class__():
                # break down to dots
                subList = self._getSubMapDotList("", k, v)
                # add the divit
                # subList = ['> {}'.format(i) for i in subList]
                outList.extend(subList)
            else:
                out = self._getValueStr(k, v)
                # out = '> {}'.format(out)
                out = "{}".format(out)
                outList.append(out)
        finalOut = "\n".join(outList)
        return finalOut

    def bannerStr(self):
        lines = []
        previous = None
        for k, v in self.items():
            if previous == self.__class__.__name__:
                lines.append("-")
            out = ""
            if isinstance(v, self.__class__):
                name = k
                subMap = v
                out = self._getSubMapStr(name, subMap)
                lines.append(out)
                previous = self.__class__.__name__
            else:
                out = self._getValueStr(k, v)
                lines.append(out)
                previous = "other"
        lines.append("--")
        s = "\n".join(lines)
        return s
Beispiel #23
0
class DotMap(MutableMapping, OrderedDict):
    def __init__(self, *args, **kwargs):
        self._map = OrderedDict()
        self._dynamic = kwargs.pop('_dynamic', True)
        self._prevent_method_masking = kwargs.pop('_prevent_method_masking',
                                                  False)
        trackedIDs = kwargs.pop('_trackedIDs', {})

        if args:
            d = args[0]
            # for recursive assignment handling
            trackedIDs[id(d)] = self

            src = []
            if isinstance(d, MutableMapping):
                src = self.__call_items(d)
            elif isinstance(d, Iterable):
                src = d

            for k, v in src:
                if self._prevent_method_masking and k in reserved_keys:
                    raise KeyError('"{}" is reserved'.format(k))
                if isinstance(v, dict):
                    idv = id(v)
                    if idv in trackedIDs:
                        v = trackedIDs[idv]
                    else:
                        trackedIDs[idv] = v
                        v = self.__class__(v,
                                           _dynamic=self._dynamic,
                                           _prevent_method_masking=self.
                                           _prevent_method_masking,
                                           _trackedIDs=trackedIDs)
                if type(v) is list:
                    l = []
                    for i in v:
                        n = i
                        if isinstance(i, dict):
                            idi = id(i)
                            if idi in trackedIDs:
                                n = trackedIDs[idi]
                            else:
                                trackedIDs[idi] = i
                                n = self.__class__(
                                    i,
                                    _dynamic=self._dynamic,
                                    _prevent_method_masking=self.
                                    _prevent_method_masking)
                        l.append(n)
                    v = l
                self._map[k] = v
        if kwargs:
            for k, v in self.__call_items(kwargs):
                if self._prevent_method_masking and k in reserved_keys:
                    raise KeyError('"{}" is reserved'.format(k))
                self._map[k] = v

    def __call_items(self, obj):
        if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
            return obj.iteritems()
        else:
            return obj.items()

    def items(self):
        return self.iteritems()

    def iteritems(self):
        return self.__call_items(self._map)

    def __iter__(self):
        return self._map.__iter__()

    def next(self):
        return self._map.next()

    def __setitem__(self, k, v):
        self._map[k] = v

    def __getitem__(self, k):
        if k not in self._map and self._dynamic and k != '_ipython_canary_method_should_not_exist_':
            # automatically extend to new DotMap
            self[k] = self.__class__()
        return self._map[k]

    def __setattr__(self, k, v):
        if k in {
                '_map', '_dynamic', '_ipython_canary_method_should_not_exist_',
                '_prevent_method_masking'
        }:
            super(DotMap, self).__setattr__(k, v)
        elif self._prevent_method_masking and k in reserved_keys:
            raise KeyError('"{}" is reserved'.format(k))
        else:
            self[k] = v

    def __getattr__(self, k):
        if k.startswith('__') and k.endswith('__'):
            raise AttributeError(k)

        if k in {
                '_map', '_dynamic', '_ipython_canary_method_should_not_exist_'
        }:
            return super(DotMap, self).__getattr__(k)

        try:
            v = super(self.__class__, self).__getattribute__(k)
            return v
        except AttributeError:
            pass

        return self[k]

    def __delattr__(self, key):
        return self._map.__delitem__(key)

    def __contains__(self, k):
        return self._map.__contains__(k)

    def __add__(self, other):
        if self.empty():
            return other
        else:
            self_type = type(self).__name__
            other_type = type(other).__name__
            msg = "unsupported operand type(s) for +: '{}' and '{}'"
            raise TypeError(msg.format(self_type, other_type))

    def __str__(self, seen=None):
        items = []
        seen = {id(self)} if seen is None else seen
        for k, v in self.__call_items(self._map):
            # circular assignment case
            if isinstance(v, self.__class__):
                if id(v) in seen:
                    items.append('{0}={1}(...)'.format(
                        k, self.__class__.__name__))
                else:
                    seen.add(id(v))
                    items.append('{0}={1}'.format(k, v.__str__(seen)))
            else:
                items.append('{0}={1}'.format(k, repr(v)))
        joined = ', '.join(items)
        out = '{0}({1})'.format(self.__class__.__name__, joined)
        return out

    def __repr__(self):
        return str(self)

    def toDict(self, seen=None):
        if seen is None:
            seen = {}

        d = {}

        seen[id(self)] = d

        for k, v in self.items():
            if issubclass(type(v), DotMap):
                idv = id(v)
                if idv in seen:
                    v = seen[idv]
                else:
                    v = v.toDict(seen=seen)
            elif type(v) in (list, tuple):
                l = []
                for i in v:
                    n = i
                    if issubclass(type(i), DotMap):
                        idv = id(n)
                        if idv in seen:
                            n = seen[idv]
                        else:
                            n = i.toDict(seen=seen)
                    l.append(n)
                if type(v) is tuple:
                    v = tuple(l)
                else:
                    v = l
            d[k] = v
        return d

    def pprint(self, pformat='dict'):
        if pformat == 'json':
            print(dumps(self.toDict(), indent=4, sort_keys=True))
        else:
            pprint(self.toDict())

    def empty(self):
        return (not any(self))

    # proper dict subclassing
    def values(self):
        return self._map.values()

    # ipython support
    def __dir__(self):
        return self.keys()

    @classmethod
    def parseOther(self, other):
        if issubclass(type(other), DotMap):
            return other._map
        else:
            return other

    def __cmp__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__cmp__(other)

    def __eq__(self, other):
        other = DotMap.parseOther(other)
        if not isinstance(other, dict):
            return False
        return self._map.__eq__(other)

    def __ge__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ge__(other)

    def __gt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__gt__(other)

    def __le__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__le__(other)

    def __lt__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__lt__(other)

    def __ne__(self, other):
        other = DotMap.parseOther(other)
        return self._map.__ne__(other)

    def __delitem__(self, key):
        return self._map.__delitem__(key)

    def __len__(self):
        return self._map.__len__()

    def clear(self):
        self._map.clear()

    def copy(self):
        return self.__class__(self)

    def __copy__(self):
        return self.copy()

    def __deepcopy__(self, memo=None):
        return self.copy()

    def get(self, key, default=None):
        return self._map.get(key, default)

    def has_key(self, key):
        return key in self._map

    def iterkeys(self):
        return self._map.iterkeys()

    def itervalues(self):
        return self._map.itervalues()

    def keys(self):
        return self._map.keys()

    def pop(self, key, default=None):
        return self._map.pop(key, default)

    def popitem(self):
        return self._map.popitem()

    def setdefault(self, key, default=None):
        return self._map.setdefault(key, default)

    def update(self, *args, **kwargs):
        if len(args) != 0:
            self._map.update(*args)
        self._map.update(kwargs)

    def viewitems(self):
        return self._map.viewitems()

    def viewkeys(self):
        return self._map.viewkeys()

    def viewvalues(self):
        return self._map.viewvalues()

    @classmethod
    def fromkeys(cls, seq, value=None):
        d = cls()
        d._map = OrderedDict.fromkeys(seq, value)
        return d

    def __getstate__(self):
        return self.__dict__

    def __setstate__(self, d):
        self.__dict__.update(d)

    # bannerStr
    def _getListStr(self, items):
        out = '['
        mid = ''
        for i in items:
            mid += '  {}\n'.format(i)
        if mid != '':
            mid = '\n' + mid
        out += mid
        out += ']'
        return out

    def _getValueStr(self, k, v):
        outV = v
        multiLine = len(str(v).split('\n')) > 1
        if multiLine:
            # push to next line
            outV = '\n' + v
        if type(v) is list:
            outV = self._getListStr(v)
        out = '{} {}'.format(k, outV)
        return out

    def _getSubMapDotList(self, pre, name, subMap):
        outList = []
        if pre == '':
            pre = name
        else:
            pre = '{}.{}'.format(pre, name)

        def stamp(pre, k, v):
            valStr = self._getValueStr(k, v)
            return '{}.{}'.format(pre, valStr)

        for k, v in subMap.items():
            if isinstance(v, DotMap) and v != DotMap():
                subList = self._getSubMapDotList(pre, k, v)
                outList.extend(subList)
            else:
                outList.append(stamp(pre, k, v))
        return outList

    def _getSubMapStr(self, name, subMap):
        outList = ['== {} =='.format(name)]
        for k, v in subMap.items():
            if isinstance(v, self.__class__) and v != self.__class__():
                # break down to dots
                subList = self._getSubMapDotList('', k, v)
                # add the divit
                # subList = ['> {}'.format(i) for i in subList]
                outList.extend(subList)
            else:
                out = self._getValueStr(k, v)
                # out = '> {}'.format(out)
                out = '{}'.format(out)
                outList.append(out)
        finalOut = '\n'.join(outList)
        return finalOut

    def bannerStr(self):
        lines = []
        previous = None
        for k, v in self.items():
            if previous == self.__class__.__name__:
                lines.append('-')
            out = ''
            if isinstance(v, self.__class__):
                name = k
                subMap = v
                out = self._getSubMapStr(name, subMap)
                lines.append(out)
                previous = self.__class__.__name__
            else:
                out = self._getValueStr(k, v)
                lines.append(out)
                previous = 'other'
        lines.append('--')
        s = '\n'.join(lines)
        return s
Beispiel #24
0
class PartiePT(Partie):
    __tablename__ = "partie_pestuse"
    __mapper_args__ = {'polymorphic_identity': 'pestuse'}
    partie_id = Column(Integer, ForeignKey('parties.id'), primary_key=True)
    repetitions = relationship('RepetitionsPT')

    def __init__(self, le2mserv, joueur):
        super(PartiePT, self).__init__("pestuse", "PT")
        self._le2mserv = le2mserv
        self.joueur = joueur
        self._texte_recapitulatif = u""
        self._texte_final = u""
        # self.PT_gain_ecus = 0
        #        self.PT_gain_euros = 0
        self._histo_build = OrderedDict()
        self._histo_build[le2mtrans(u"Period")] = "PT_period"
        self._histo_build[le2mtrans(u"DecisionY")] = "PT_decisionY"
        self._histo_build[le2mtrans(u"DecisionZ")] = "PT_decisionZ"
        self._histo_build[le2mtrans(u"NbAteliersY")] = "PT_nbAteliersY"
        self._histo_build[le2mtrans(u"NbAteliersZ")] = "PT_nbAteliersZ"
        self._histo_build[le2mtrans(u"RendementY")] = "PT_rendementY"
        self._histo_build[le2mtrans(u"RendementZ")] = "PT_rendementZ"
        self._histo_build[le2mtrans(u"ProfitY")] = "PT_profitY"
        self._histo_build[le2mtrans(u"ProfitZ")] = "PT_profitZ"
        self._histo_build[le2mtrans(u"Tirage_de")] = "PT_tirage_de"
        self._histo_build[le2mtrans(u"GainY")] = "PT_gainY"
        self._histo_build[le2mtrans(u"GainZ")] = "PT_gainZ"
        self._histo_build[le2mtrans(u"Period\npayoff")] = "PT_periodpayoff"
        self._histo_build[le2mtrans(u"Cumulative\npayoff")] = "PT_cumulativepayoff"
        self._histo_content = [list(self._histo_build.viewkeys())]
        #        self.periods = {}
        self._currentperiod = None

    @defer.inlineCallbacks
    def display_QC(self, type_partie):
        """
        Display the comprehension questionnaire screen on the remote
        Get back the decision
        :return:
        """
        if type_partie == "BEN":
            QC = list(pms.QCBEN)
        elif type_partie == "WEX":
            QC = list(pms.QCWEX)
        elif type_partie == "WEA":
            QC = list(pms.QCWEA)
        elif type_partie == "WIN":
            QC = list(pms.QCWIN)
        elif type_partie == "WEI":
            QC = list(pms.QCWEI)
        elif type_partie == "WIE":
            QC = list(pms.QCWIE)
        QC_NbQuest = len(QC)
        reponses_fausse = []
        for i_QC_NbQuest in range(0, QC_NbQuest):
            logger.debug(u"{} Decision".format(self.joueur))
            debut_QC = datetime.now()
            self.PT_decision_QC = yield (self.remote.callRemote(
                "display_QC", i_QC_NbQuest, type_partie))
            self.PT_decisiontime_QC = (datetime.now() - debut_QC).seconds
            indice_bonne_reponse = QC[i_QC_NbQuest][1].index(QC[i_QC_NbQuest][2][0])
            if self.PT_decision_QC != indice_bonne_reponse:
                reponses_fausse.append(i_QC_NbQuest)
            # self.joueur.info(u"{}".format(self.PT_decision_QC))
            self.joueur.remove_waitmode()
        self.joueur.info(u"Faute(s) {}".format(reponses_fausse))

    @property
    def currentperiod(self):
        return self._currentperiod

    @defer.inlineCallbacks
    def configure(self):
        logger.debug(u"{} Configure".format(self.joueur))
        yield (self.remote.callRemote("configure", get_module_attributes(pms)))
        self.joueur.info(u"Ok")

    @defer.inlineCallbacks
    def newperiod(self, period):
        """
        Create a new period and inform the remote
        If this is the first period then empty the historic
        :param periode:
        :return:
        """
        logger.debug(u"{} New Period".format(self.joueur))
        if period == 1:
            del self._histo_content[1:]
        self._currentperiod = RepetitionsPT(period)
        self._le2mserv.gestionnaire_base.ajouter(self.currentperiod)
        self.repetitions.append(self.currentperiod)
        yield (self.remote.callRemote("newperiod", period))
        logger.info(u"{} Ready for period {}".format(self.joueur, period))

    @defer.inlineCallbacks
    def display_decision(self, type_partie):
        """
        Display the decision screen on the remote
        Get back the decision
        :return:
        """
        logger.debug(u"{} Decision".format(self.joueur))
        debut = datetime.now()
        les_decisions = yield (self.remote.callRemote(
            "display_decision", self._histo_content, type_partie))
        self.currentperiod.PT_decisionY = les_decisions[0]
        self.currentperiod.PT_decisionZ = les_decisions[1]
        self.currentperiod.PT_nbAteliersY = les_decisions[2]
        self.currentperiod.PT_nbAteliersZ = 10 - les_decisions[2]
        self.currentperiod.PT_type_partie = type_partie
        self.currentperiod.PT_decisiontime = (datetime.now() - debut).seconds
        self.joueur.info(u"{} {} {} {}".format(self.currentperiod.PT_nbAteliersY, self.currentperiod.PT_nbAteliersZ,
                                               self.currentperiod.PT_decisionY, self.currentperiod.PT_decisionZ))
        self.joueur.remove_waitmode()

    @defer.inlineCallbacks
    def lance_de(self, type_partie):
        """
        Lancement du de
        :return:
        """
        logger.debug(u"{} tirage du dé".format(self.joueur))
        debut_de = datetime.now()
        self.currentperiod.PT_tirage_de = yield (self.remote.callRemote(
            "tirage_de", type_partie))
        self.currentperiod.PT_decisiontime_de = (datetime.now() - debut_de).seconds
        self.joueur.info(u"{}".format(self.currentperiod.PT_tirage_de))
        self.joueur.remove_waitmode()

    @defer.inlineCallbacks
    def affichage_result(self, type_partie, indice_part_pestuse, tirage_part_pestuse_gain, tirage_periode_pestuse_gain):
        """
        Affichage des resultat de la periode
        :return:
        """
        decision_pour_Y = self.currentperiod.PT_decisionY
        decision_pour_Z = self.currentperiod.PT_decisionZ
        nbAtelierY = self.currentperiod.PT_nbAteliersY
        nbAtelierZ = self.currentperiod.PT_nbAteliersZ
        tirage_du_de = self.currentperiod.PT_tirage_de
        logger.debug(u"{} Affichage des résultats de la période".format(self.joueur))
        debut_ar = datetime.now()
        les_retours = yield (self.remote.callRemote(
            "affichage_result", decision_pour_Y, decision_pour_Z, tirage_du_de, type_partie, nbAtelierY, nbAtelierZ))
        self.currentperiod.PT_rendementY = les_retours[0]
        self.currentperiod.PT_rendementZ = les_retours[1]
        self.currentperiod.PT_profitY = les_retours[2]
        self.currentperiod.PT_profitZ = les_retours[3]
        self.currentperiod.PT_tirage_de = les_retours[4]
        self.currentperiod.PT_gainY = les_retours[5]
        self.currentperiod.PT_gainZ = les_retours[6]
        # On met la valeur des gains si on est sur la partie pestuse tiree au sort et la bonne periode
        self.currentperiod.PT_periodpayoff = 0
        # print "indice_part_pestuse = ",  indice_part_pestuse
        #        print "tirage_part_pestuse_gain = ",  tirage_part_pestuse_gain
        #        print "\n"
        #        print "self.currentperiod.PT_period = ", self.currentperiod.PT_period
        #        print "tirage_periode_pestuse_gain",  tirage_periode_pestuse_gain
        if indice_part_pestuse == tirage_part_pestuse_gain and self.currentperiod.PT_period == tirage_periode_pestuse_gain:
            self.currentperiod.PT_periodpayoff = les_retours[5] + les_retours[6]
            self.PT_gain_ecus = les_retours[5] + les_retours[6]
            self.PT_gain_euros = \
                float(self.PT_gain_ecus) * float(pms.TAUX_CONVERSION)
        #            print "JE PASSE et self.currentperiod.PT_periodpayoff = ", self.currentperiod.PT_periodpayoff
        #            print " Et self.PT_gain_ecus = ",  self.PT_gain_ecus
        resu_payoff = self.currentperiod.PT_periodpayoff
        self.currentperiod.PT_indice_part_pestuse = indice_part_pestuse
        self.currentperiod.PT_tirage_part_pestuse_gain = tirage_part_pestuse_gain
        self.currentperiod.PT_tirage_periode_pestuse_gain = tirage_periode_pestuse_gain
        # On remplit l historique
        self._histo_content.append(
            [getattr(self.currentperiod, e) for e
             in self._histo_build.viewvalues()])
        self.currentperiod.PT_decisiontime_ar = (datetime.now() - debut_ar).seconds
        self.joueur.info(u"{}".format(self.currentperiod.PT_tirage_ar))
        self.joueur.remove_waitmode()

        yield resu_payoff

    def compute_periodpayoff(self):
        """
        Compute the payoff for the period
        :return:
        """
        logger.debug(u"{} Period Payoff".format(self.joueur))
        # self.currentperiod.PT_periodpayoff = 0

        # cumulative payoff since the first period
        if self.currentperiod.PT_period < 2:
            self.currentperiod.PT_cumulativepayoff = \
                self.currentperiod.PT_periodpayoff
        else:
            previousperiod = self.periods[self.currentperiod.PT_period - 1]
            self.currentperiod.PT_cumulativepayoff = \
                previousperiod.PT_cumulativepayoff + \
                self.currentperiod.PT_periodpayoff

        # we store the period in the self.periodes dictionnary
        self.periods[self.currentperiod.PT_period] = self.currentperiod

        logger.debug(u"{} Period Payoff {}".format(
            self.joueur,
            self.currentperiod.PT_periodpayoff))

    @defer.inlineCallbacks
    def display_summary(self, *args):
        """
        Create the summary (txt and historic) and then display it on the
        remote
        :param args:
        :return:
        """
        logger.debug(u"{} Summary".format(self.joueur))
        self._texte_recapitulatif = texts.get_recapitulatif(self.currentperiod)
        self._histo_content.append(
            [getattr(self.currentperiod, e) for e
             in self._histo_build.viewvalues()])
        yield (self.remote.callRemote(
            "display_summary", self._texte_recapitulatif, self._histo_content))
        self.joueur.info("Ok")
        self.joueur.remove_waitmode()

    def compute_partpayoff(self, tirage_part_pestuse_gain, tirage_periode_pestuse_gain):
        """
        Compute the payoff of the part
        :return:
        """
        logger.debug(u"{} Part Payoff".format(self.joueur))
        # gain partie
        self.PT_gain_ecus = self.currentperiod.PT_cumulativepayoff
        self.PT_gain_euros = \
            float(self.PT_gain_ecus) * float(pms.TAUX_CONVERSION)

        # texte final
        self._texte_final = texts.get_texte_final(
            self.PT_gain_ecus,
            self.PT_gain_euros,
            tirage_part_pestuse_gain,
            tirage_periode_pestuse_gain
        )

        logger.debug(u"{} Final text {}".format(self.joueur, self._texte_final))
        logger.info(u'{} Payoff ecus {} Payoff euros {:.2f}'.format(
            self.joueur, self.PT_gain_ecus, self.PT_gain_euros))