Example #1
1
def create_ordered_dict_and_sort(dictionary, key=None):
    dictionary = OrderedDict(dictionary)
    if key:
        dictionary.move_to_end(key, last=False)
        return dictionary
    else:
        return dictionary
def format_analyse(f_in, f_out, f_dic):
    dic = OrderedDict()
    grades = []
    lines = []
    l = []
    newline = True
    with open(f_in, "r") as fin:
        for line in fin:
            if line.startswith("------------------------"):
                continue
            if line.startswith("++++++++++++++++++++++++"):
                lines.append(l)
                newline = True
                continue
            word = line.split("\t")[2]
            if newline:
                l = []
                grades.append(word)
                newline = False
                continue
            l.append(word)
            dic[word] = 0
            dic.move_to_end(word)
    for i in range(len(grades)):
        grades[i] = "+1" if grades[i][0] == "+" else "-1"
    format_data(f_out, (grades, lines), f_dic, dic, True)
Example #3
1
class FilePool:
    def __init__(self, size, compression=None):
        self.size = size
        self.compression = compression
        self.pool = OrderedDict()
        self.hit = 0
        self.miss = 0

    def open(self, filename, mode, encoding=None, file_type=None, compression=None):
        if filename in self.pool:
            self.pool.move_to_end(filename, last=False)
            self.hit += 1
            return self.pool[filename]

        f = open(filename, mode, encoding, file_type, compression)

        if len(self.pool) >= self.size:
            _, old_f = self.pool.popitem(last=False)
            old_f.close()
        self.pool[filename] = f

        self.miss += 1
        return f

    def close(self):
        for f in self.pool.values():
            f.close()
 def _rearrange(self, table, cols):
     tbl = []
     for r in table:
         d = OrderedDict(r)
         for c in reversed(cols):
             d.move_to_end(c)
         tbl.append(OrderedDict(reversed(list(d.items()))))
     return tbl
def read_dic(f_dict):
    dic = OrderedDict()
    with open(f_dict, "r") as f:
        for line in f.readlines():
            w = line.replace("\n", "")
            if map(str.isalpha, w.split()):
                dic[w] = 0
                dic.move_to_end(w)
    return dic
class memoize_mask(object):
    """
    Decorator. Caches wktToMask keyed to model_id and the WKT string
    """

    def __init__(self, func, maxsize=50):
        """
        Args:
            func: the function to wrap
            maxsize (int): Max size of cache (in MB)
        """

        self.hits = self.misses = 0
        self.func = func
        self.maxsize = maxsize
        self.cache = OrderedDict()

    def __call__(self, *args):

        nc, fname, wkt, varname = args
        # If we have no key, automatic cache miss
        if not hasattr(nc, "model_id"):
            log.debug("Cache MISS (attribute 'model_id' not found)")
            return self.func(*args)

        # Set key to model_id and wkt polygon
        key = (nc.model_id, wkt)
        log.debug("Checking cache for key %s", key)

        with cache_lock:
            try:
                result = self.cache[key]
                self.cache.move_to_end(key)  # record recent use of this key
                self.hits += 1
                log.debug("Cache HIT")
                return result
            except KeyError:
                pass

        log.debug("Cache MISS")
        result = self.func(*args)

        with cache_lock:
            self.cache[key] = result
            self.misses += 1
            while getsize(self.cache) > self.maxsize * 1024 * 1024:  # convert to MB
                self.cache.popitem(0)  # Purge least recently used cache entry

        return result

    def cache_clear(self):
        with cache_lock:
            self.cache.clear()
            self.hits = 0
            self.misses = 0
    def _querystring(self, params):
        """Generate the querystring to be posted to the MWS endpoint

        Required parameters for every API call.

        AWSAccessKeyId: Your Amazon MWS account is identified by your access key,
            which Amazon MWS uses to look up your secret key.

        SignatureMethod: The HMAC hash algorithm you are using to calculate your
            signature. Both HmacSHA256 and HmacSHA1 are supported hash algorithms,
            but Amazon recommends using HmacSHA256.

        SignatureVersion: Which signature version is being used. This is Amazon
            MWS-specific information that tells Amazon MWS the algorithm you used
            to form the string that is the basis of the signature. For Amazon MWS,
            this value is currently SignatureVersion=2.

        Version: The version of the API section being called.

        Timestamp: Each request must contain the timestamp of the request. The
            Timestamp attribute must contain the client's machine time in
            ISO8601 format; requests with a timestamp significantly different
            (15 minutes) than the receiving machine's clock will be rejected to
            help prevent replay attacks.

        SellerId: Your seller or merchant identifier.
        """
        parameters = {
            "AWSAccessKeyId": self.mws_access_key,
            "SignatureMethod": "HmacSHA256",
            "SignatureVersion": "2",
            "Version": self._api_version,
            "Timestamp": datetime.datetime.utcnow().replace(microsecond=0).isoformat(sep="T") + "Z",
        }

        if "SellerId" not in params:
            parameters["SellerId"] = self.merchant_id

        parameters.update({k: v for (k, v) in params.items()})
        parse_results = parse.urlparse(self._mws_endpoint)

        string_to_sign = "POST\n{}\n{}\n{}".format(
            parse_results[1],
            parse_results[2],
            parse.urlencode(sorted(parameters.items())).replace("+", "%20").replace("*", "%2A").replace("%7E", "~"),
        )

        parameters["Signature"] = self._sign(string_to_sign)

        ordered_parameters = OrderedDict(sorted(parameters.items()))
        ordered_parameters.move_to_end("Signature")
        return parse.urlencode(ordered_parameters).encode(encoding="utf_8")
Example #8
0
def result_generator(count):
    use_params = {"Inspection_Start": "01/26/2015", "Inspection_End": "201/26/2016", "Zip_Code": "98101"}
    # html = get_inspection_page(**use_params)
    html = load_inspection_page("inspection_page.html")
    parsed = parse_source(html)
    content_col = parsed.find("td", id="contentcol")
    data_list = restaurant_data_generator(content_col)
    for data_div in data_list[: int(count)]:
        metadata = OrderedDict(extract_restaurant_metadata(data_div))
        inspection_data = get_score_data(data_div)
        metadata.update(inspection_data)
        metadata.move_to_end(check_sorting(), last=False)
        print(metadata)
        yield metadata
Example #9
0
    def load(self, base_settings):
        """Merge local settings from file with ``base_settings``.

        Returns a new OrderedDict containing the base settings and the
        loaded settings. Ordering is:

            - base settings
            - settings from extended file(s), if any
            - settings from file

        When a setting is overridden, it gets moved to the end.

        """
        if not os.path.exists(self.file_name):
            self.print_warning("Local settings file `{0}` not found".format(self.file_name))
            return
        is_upper = lambda k: k == k.upper()
        settings = OrderedDict((k, v) for (k, v) in base_settings.items() if is_upper(k))
        for k, v in self.read_file().items():
            names = k.split(".")
            v = self._parse_setting(v, expand_vars=True)
            obj = settings
            for name, next_name in zip(names[:-1], names[1:]):
                next_name = self._convert_name(next_name)
                next_is_seq = isinstance(next_name, int)
                default = [PLACEHOLDER] * (next_name + 1) if next_is_seq else {}
                if isinstance(obj, Mapping):
                    if name not in obj:
                        obj[name] = default
                elif isinstance(obj, Sequence):
                    name = int(name)
                    while name >= len(obj):
                        obj.append(PLACEHOLDER)
                    if obj[name] is PLACEHOLDER:
                        obj[name] = default
                obj = obj[name]
            name = self._convert_name(names[-1])
            try:
                curr_v = obj[name]
            except (KeyError, IndexError):
                pass
            else:
                if isinstance(curr_v, LocalSetting):
                    curr_v.value = v
                    self.registry[curr_v] = name
            obj[name] = v
            settings.move_to_end(names[0])
        settings.pop("extends", None)
        self._do_interpolation(settings, settings)
        return settings
Example #10
0
class LRUCache:
    def __init__(self, capacity):
        self.capacity = capacity
        self.cache = OrderedDict()

    def get(self, key):
        val = self.cache.get(key, -1)
        if val != -1:
            self.cache.move_to_end(key, last=True)
        return val

    def set(self, key, value):
        if self.cache.get(key, -1) != -1:
            self.cache.pop(key)
        if len(self.cache) == self.capacity:
            self.cache.popitem(last=False)

        self.cache[key] = value
def build_dic(lines):
    dic = OrderedDict()
    final_lines = []
    print("Building dictionnary...")
    for line in lines:
        l = line.split("\t")
        if len(l) == 1:
            lines.remove(line)
            continue
        l = l[1]
        l = clean_string(l)
        words = bigrams(l) + l.split()
        final_lines.append(words)
        for w in words:
            dic[w] = 0
            dic.move_to_end(w)
    done()
    return (dic, final_lines)
Example #12
0
def cleanup(raw_dict):
    od = OrderedDict((trans[k], fix(trans[k], v)) for k, v in raw_dict.items() if trans[k] not in irrelevant)
    if not od:
        return {}
    if "points" in od:
        od.move_to_end("points", last=False)
    od.move_to_end("id", last=False)
    if "site" in od:
        od.move_to_end("site")
    od.move_to_end("syllabus")
    return od
Example #13
0
class GladiatorBuilder:
    MAX_ATTRIUBE_SUM = 20

    def __init__(self):
        self.attributes = {}

    def readAttributes(self, attributesMetainfo):
        self.attributes = {
            attribute["code"]: self.inputAttribute(attribute["name"], attribute["converter"])
            for attribute in attributesMetainfo
        }
        self.checkAtrributeSum()
        self.attributes = OrderedDict(sorted(self.attributes.items()))
        self.attributes.move_to_end("nm", last=False)
        return self.attributes

    def inputAttribute(self, attributeName, converter):
        try:
            return converter.convertValue(input("Input gladiator %s: " % (attributeName)))
        except Exception as error:
            print(error)
            return self.inputAttribute(attributeName, converter)

    def getGladiator(self):
        if len(self.attributes) < 1:
            raise Exception("Can't read it. Gladiator data not specified.")
        return Gladiator(self.attributes)

    def checkAtrributeSum(self):
        summ = 0
        for attributeMeta in AttributeLibrary.BASE_SET:
            if attributeMeta["AttrType"] == "battle":
                for attribute in self.attributes:
                    if attributeMeta["code"] == attribute:
                        summ += self.attributes[attribute]
        if summ != GladiatorBuilder.MAX_ATTRIUBE_SUM:
            print("Attribute sum must be equal to %i" % GladiatorBuilder.MAX_ATTRIUBE_SUM)
            a = input("Хотите пересоздать гладиатора? [y/n]").lower()
            if a == "y":
                self.readAttributes(AttributeLibrary.BASE_SET)
            else:
                self.attributes = {}
    def rules(self, node):
        if node != self.root:
            parent = self.parent(node)
            # Convert the parent list of rules into an ordered dict
            pr = OrderedDict([(r.attr_name, r) for r in self.rules(parent)])

            parent_attr = self.attribute(parent)
            # Get the parent attribute type
            parent_attr_cv = parent_attr.compute_value

            is_left_child = self.__left_child(parent) == node

            # The parent split variable is discrete
            if isinstance(parent_attr_cv, Indicator) and hasattr(parent_attr_cv.variable, "values"):
                values = parent_attr_cv.variable.values
                attr_name = parent_attr_cv.variable.name
                eq = not is_left_child * (len(values) != 2)
                value = values[abs(parent_attr_cv.value - is_left_child * (len(values) == 2))]
                new_rule = DiscreteRule(attr_name, eq, value)
                # Since discrete variables should appear in their own lines
                # they must not be merged, so the dict key is set with the
                # value, so the same keys can exist with different values
                # e.g. #legs ≠ 2 and #legs ≠ 4
                attr_name = attr_name + "_" + value
            # The parent split variable is continuous
            else:
                attr_name = parent_attr.name
                sign = not is_left_child
                value = self._tree.threshold[self.parent(node)]
                new_rule = ContinuousRule(attr_name, sign, value, inclusive=is_left_child)

            # Check if a rule with that attribute exists
            if attr_name in pr:
                pr[attr_name] = pr[attr_name].merge_with(new_rule)
                pr.move_to_end(attr_name)
            else:
                pr[attr_name] = new_rule

            return list(pr.values())
        else:
            return []
Example #15
0
def generate_schema():
    gendef = load_json("general_definition")
    unit_types = set()
    sensor_types = []
    keyfunc = lambda s: "" if s["data_type"] == "boolean" else s["unit_type"]
    sensors = sorted(gendef["sensors"].values(), key=keyfunc)
    for unit_type, it in itertools.groupby(sensors, key=keyfunc):
        unit_types.add(unit_type)
        unit = {"$ref": "#/definitions/units/{}".format(unit_type)} if unit_type else {"not": {}}
        names = sorted(s["sensor_name"] for s in it)
        props = OrderedDict([("type", {"enum": names}), ("unit", unit)])
        obj = OrderedDict([("required", ["unit"]), ("properties", props)][int(not unit_type) :])
        sensor_types.append(obj)
    all_units = load_json("units")
    units = OrderedDict((k, {"enum": sorted(all_units[k])}) for k in sorted(all_units) if k in unit_types)
    schema = load_schema()
    units["title"] = schema["definitions"]["units"]["title"]
    units.move_to_end("title", last=False)
    schema["definitions"]["units"] = units
    schema["definitions"]["sensor"]["oneOf"] = sensor_types
    return schema
def signature():
    amount = request.args.get("amount")
    currency_code = request.args.get("currencyCode")
    seller_note = request.args.get("sellerNote")
    seller_order_id = request.args.get("sellerOrderId")

    parameters = {
        "accessKey": session["mws_access_key"],
        "amount": amount,
        "sellerId": session["merchant_id"],
        "returnURL": session["return_url"],
        "lwaClientId": session["client_id"],
        "sellerNote": seller_note,
        "sellerOrderId": seller_order_id,
        "currencyCode": currency_code,
        "shippingAddressRequired": "true",
        "paymentAction": "AuthorizeAndCapture",
    }

    # create querystring to sign
    string_to_sign = "POST\npayments.amazon.com\n/\n{}".format(
        parse.urlencode(sorted(parameters.items())).replace("+", "%20").replace("*", "%2A").replace("%7E", "~")
    )

    # generate signature
    signature = hmac.new(
        session["mws_secret_key"].encode("utf_8"), msg=string_to_sign.encode("utf_8"), digestmod=hashlib.sha256
    ).digest()
    signature = base64.b64encode(signature).decode()

    # add signature to parameter list
    parameters["signature"] = parse.quote_plus(signature)

    # order the parameters and move signature to the end
    ordered_parameters = OrderedDict(sorted(parameters.items()))
    ordered_parameters.move_to_end("signature")

    # return it
    # return json.dumps(parse.urlencode(ordered_parameters).encode(encoding='utf_8'))
    return json.dumps(ordered_parameters)
Example #17
0
    def __new__(metacls, classname, bases, class_dict, **kargs):

        pkey_attrs = OrderedDict()
        value_attrs = OrderedDict()
        for base_cls in reversed(bases):  # overwriting priority, keep the first.
            attrs = getattr(base_cls, "__dobject_key__", None)
            if attrs is not None:
                for attr_name, attr in attrs.items():
                    pkey_attrs[attr_name] = attr.copy()

            attrs = getattr(base_cls, "__dobject_att__", None)
            if attrs is not None:
                for attr_name, attr in attrs.items():
                    pkey_attrs[attr_name] = attr.copy()

        attributes = OrderedDict()
        for attr_name, descriptor in class_dict.items():
            if attr_name.startswith("_"):
                if attr_name not in _keywords:
                    err = "Unknown preserved attribute in %s: %s"
                    err %= (classname, attr_name)
                    raise ValueError(err)

                continue

            if isinstance(descriptor, DAttribute):
                descriptor.name = attr_name
                attributes[attr_name] = descriptor
            else:
                err = "Unknown attribute declaration in %s: %s"
                err %= (classname, attr_name)
                raise TypeError(err)

        primary_key = class_dict.pop("__dobject_key__", None)
        primary_key = parse_attr_value_many(primary_key, "__dobject_key__")
        pkey_names = set(primary_key.keys())

        # ----------------------------------------------------------------------
        if pkey_names:
            # If available, the primary key declaration overrides parent's
            for attr_name, attr in tuple(reversed(pkey_attrs.items())):
                if attr_name not in pkey_names:
                    # the pk attribute of child is not primary key
                    del pkey_attrs[attr_name]
                    value_attrs[attr_name] = attr
                    value_attrs.move_to_end(attr_name, last=False)

        for attr_name, attr in attributes.items():
            if attr.name in pkey_names:
                pkey_attrs[attr_name] = attr
            else:
                value_attrs[attr_name] = attr

        class_dict["__dobject_key__"] = pkey_attrs
        class_dict["__dobject_att__"] = value_attrs
        class_dict["__dobject_origin_class__"] = None
        class_dict["__dobject_mapping__"] = OrderedDict()
        class_dict["_re"] = ReshapeDescriptor()

        cls = type.__new__(metacls, classname, bases, class_dict)

        # After class is created, ...
        # Becasuse the dset aggregate requires the complete class info.
        for attr_name, attr in attributes.items():
            attr.setup(cls, attr_name)  # set owner and other something...

        setattr(cls, "__dobject_key_class__", _make_pkey_class(cls))

        return cls
Example #18
0
class BufferPool:
    """
  A buffer pool implementation.

  Since the buffer pool is a cache, we do not provide any serialization methods.

  >>> schema = DBSchema('employee', [('id', 'int'), ('age', 'int')])
  >>> bp = BufferPool()
  >>> fm = Storage.FileManager.FileManager(bufferPool=bp)
  >>> bp.setFileManager(fm)

  # Check initial buffer pool size
  >>> len(bp.pool.getbuffer()) == bp.poolSize
  True

  """

    defaultPoolSize = 128 * (1 << 20)

    def __init__(self, **kwargs):
        other = kwargs.get("other", None)
        if other:
            self.fromOther(other, **kwargs)

        else:
            self.pageSize = kwargs.get("pageSize", io.DEFAULT_BUFFER_SIZE)
            self.poolSize = kwargs.get("poolSize", BufferPool.defaultPoolSize)

            self.pool = io.BytesIO(b"\x00" * self.poolSize)
            self.pageMap = OrderedDict()
            self.freeList = list(range(0, self.poolSize, self.pageSize))
            self.freeListLen = len(self.freeList)

            self.fileMgr = None

    def fromOther(self, other):
        self.pageSize = other.pageSize
        self.poolSize = other.poolSize
        self.pool = other.pool
        self.pageMap = other.pageMap
        self.freeList = other.freeList
        self.freeListLen = other.freeListLen
        self.fileMgr = other.fileMgr

    def setFileManager(self, fileMgr):
        self.fileMgr = fileMgr

    # Basic statistics

    def numPages(self):
        return math.floor(self.poolSize / self.pageSize)

    def numFreePages(self):
        return self.freeListLen

    def size(self):
        return self.poolSize

    def freeSpace(self):
        return self.numFreePages() * self.pageSize

    def usedSpace(self):
        return self.size() - self.freeSpace()

    # Buffer pool operations

    def hasPage(self, pageId):
        return pageId in self.pageMap

    # Gets a page from the buffer pool if present, otherwise reads it from a heap file.
    # This method returns both the page, as well as a boolean to indicate whether
    # there was a cache hit.
    def getPageWithHit(self, pageId, pinned=False):
        if self.fileMgr:
            if self.hasPage(pageId):
                return (self.getCachedPage(pageId, pinned)[1], True)

            else:
                # Fetch the page from the file system, adding it to the buffer pool
                if not self.freeList:
                    self.evictPage()

                self.freeListLen -= 1
                offset = self.freeList.pop(0)
                pageBuffer = self.pool.getbuffer()[offset : offset + self.pageSize]
                page = self.fileMgr.readPage(pageId, pageBuffer)

                self.pageMap[pageId] = (offset, page, 1 if pinned else 0)
                self.pageMap.move_to_end(pageId)
                return (page, False)

        else:
            raise ValueError("Uninitalized buffer pool, no file manager found")

    # Wrapper for getPageWithHit, returning only the page.
    def getPage(self, pageId, pinned=False):
        return self.getPageWithHit(pageId, pinned)[0]

    # Returns a triple of offset, page object, and pin count
    # for pages present in the buffer pool.
    def getCachedPage(self, pageId, pinned=False):
        if self.hasPage(pageId):
            if pinned:
                self.incrementPinCount(pageId, 1)
            return self.pageMap[pageId]
        else:
            return (None, None, None)

    # Pins a page.
    def pinPage(self, pageId):
        if self.hasPage(pageId):
            self.incrementPinCount(pageId, 1)

    # Unpins a page.
    def unpinPage(self, pageId):
        if self.hasPage(pageId):
            self.incrementPinCount(pageId, -1)

    # Returns the pin count for a page.
    def pagePinCount(self, pageId):
        if self.hasPage(pageId):
            return self.pageMap[pageId][2]

    # Update the pin counter for a cached page.
    def incrementPinCount(self, pageId, delta):
        (offset, page, pinCount) = self.pageMap[pageId]
        self.pageMap[pageId] = (offset, page, pinCount + delta)

    # Removes a page from the page map, returning it to the free
    # page list without flushing the page to the disk.
    def discardPage(self, pageId):
        if self.hasPage(pageId):
            (offset, _, pinCount) = self.pageMap[pageId]
            if pinCount == 0:
                self.freeList.append(offset)
                self.freeListLen += 1
                del self.pageMap[pageId]

    # Removes a page from the page map, returning it to the free
    # page list. This method also flushes the page to disk.
    def flushPage(self, pageId):
        if self.fileMgr:
            (offset, page, pinCount) = self.getCachedPage(pageId)
            if all(map(lambda x: x is not None, [offset, page, pinCount])):
                if pinCount == 0:
                    self.freeList.append(offset)
                    self.freeListLen += 1
                    del self.pageMap[pageId]

                if page.isDirty():
                    self.fileMgr.writePage(page)
        else:
            raise ValueError("Uninitalized buffer pool, no file manager found")

    # Evict using LRU policy, considering only unpinned pages.
    # We implement LRU through the use of an OrderedDict, and by moving pages
    # to the end of the ordering every time it is accessed through getPage()
    def evictPage(self):
        if self.pageMap:
            # Find an unpinned page to evict.
            pageToEvict = None
            for (pageId, (_, _, pinCount)) in self.pageMap.items():
                if pinCount == 0:
                    pageToEvict = pageId
                    break

            if pageToEvict:
                self.flushPage(pageToEvict)

            else:
                raise ValueError("Could not find a page to evict in the buffer pool")

    def clear(self):
        for (pageId, (offset, page, _)) in self.pageMap.items():
            if page.isDirty():
                self.flushPage(pageId)
Example #19
0
class GPGDatabase(object):

    _resources = None

    def __init__(self):
        self._resources = OrderedDict()

    def load_default_resources(self, force=False, read_only=False):
        for (filename, secret) in DEFAULT_RESOURCES:
            self.add_resource(filename, force=force, primary=False, default=True, read_only=read_only, secret=secret)

    def add_resource(self, filename, force=False, primary=False, default=False, read_only=False, secret=False):
        resource = get_resource(filename, force, secret, read_only, default)
        resource = self.register_resource(resource.filename, resource, primary)
        return resource

    def add_key(self, key):
        if isinstance(key, TransferablePublicKey):
            for resource in self._resources.values():
                if not resource.secret:
                    resource.add_transferrable_key(key)
                    break
        elif isinstance(key, TransferableSecretKey):
            for resource in self._resources.values():
                if resource.secret:
                    resource.add_transferrable_key(key)
                    break
        else:
            raise TypeError

    def delete_key(self, key):
        if isinstance(key, TransferablePublicKey):
            for resource in self._resources.values():
                if not resource.secret:
                    resource.delete_transferrable_key(key)
                    break
        elif isinstance(key, TransferableSecretKey):
            for resource in self._resources.values():
                if resource.secret:
                    resource.delete_transferrable_key(key)
                    break
        else:
            raise TypeError

    def update_key(self, key):
        if isinstance(key, TransferablePublicKey):
            for resource in self._resources.values():
                if not resource.secret:
                    resource.update_transferrable_key(key)
                    break
        elif isinstance(key, TransferableSecretKey):
            for resource in self._resources.values():
                if resource.secret:
                    resource.update_transferrable_key(key)
                    break
        else:
            raise TypeError

    def register_resource(self, name, resource, primary):
        resource = self._resources.setdefault(name, resource)
        if primary:
            self._resources.move_to_end(name, last=False)
        return resource

    def _matches_user_id(self, key, user_id):
        match = False
        for uid in key.user_ids:
            if user_id.lower() in uid.user_id.lower():
                match = True
                break
        return match

    def keys(self):
        for resource in self._resources.values():
            yield from resource.keys()

    def search(self, fingerprint=None, key_id=None, user_id=None):
        results = []
        if fingerprint is None and key_id is None and user_id is None:
            return results
        for resource in self._resources.values():
            if fingerprint or key_id:
                try:
                    key = resource.get_transferrable_key(fingerprint or key_id)
                except KeyError:
                    continue
                if user_id is not None:
                    if self._matches_user_id(key, user_id):
                        results.append(key)
                else:
                    results.append(key)
            else:
                # User ID only. Be really dumb and iterate.
                for key in resource.values():
                    if self._matches_user_id(key, user_id):
                        results.append(key)
        return results
Example #20
0
class ArgumentCache(object):
    """
    >>> cache = ArgumentCache()
    >>> "foo" in cache
    False
    >>> cache['foo']
    Traceback (most recent call last):
        ...
    KeyError: 'foo'
    >>> len(cache)
    0
    >>> key = cache.add("Hello, world!")
    >>> key
    'bea2c9d7fd040292e0424938af39f7d6334e8d8a'
    >>> cache[key]
    'Hello, world!'
    >>> key in cache
    True
    >>> len(cache)
    1
    >>> cache.get_missing([
    ...    ('bar', key),
    ...    ('baz', '1111111111111111111111111111111111111111'),
    ... ])
    ['baz']
    >>> cache.add_many(['value1', 'value2'])
    ['daf626c4ebd6bdd697e043111454304e5fb1459e', '849988af22dbd04d3e353caf77f9d81241ca9ee2']
    >>> cache['daf626c4ebd6bdd697e043111454304e5fb1459e']
    'value1'
    >>> cache['849988af22dbd04d3e353caf77f9d81241ca9ee2']
    'value2'
    >>> cache[key]
    'Hello, world!'
    >>> len(cache)
    3
    >>> cache.clear()
    >>> len(cache)
    0

    Size of ArgumentCache can be limited:

    >>> cache = ArgumentCache(0)
    Traceback (most recent call last):
        ...
    ValueError: maxsize must be greater than 0
    >>> cache = ArgumentCache(2)  # limit it to 2 elements
    >>> cache.add_many(['value1', 'value2'])
    ['daf626c4ebd6bdd697e043111454304e5fb1459e', '849988af22dbd04d3e353caf77f9d81241ca9ee2']
    >>> len(cache)
    2
    >>> cache.add("Hello, world!")
    'bea2c9d7fd040292e0424938af39f7d6334e8d8a'
    >>> len(cache)
    2
    >>> cache["bea2c9d7fd040292e0424938af39f7d6334e8d8a"]
    'Hello, world!'
    >>> cache['849988af22dbd04d3e353caf77f9d81241ca9ee2']
    'value2'
    >>> cache['daf626c4ebd6bdd697e043111454304e5fb1459e']
    Traceback (most recent call last):
        ...
    KeyError: 'daf626c4ebd6bdd697e043111454304e5fb1459e'
    >>> cache.add("foo")
    'd465e627f9946f2fa0d2dc0fc04e5385bc6cd46d'
    >>> len(cache)
    2
    >>> 'bea2c9d7fd040292e0424938af39f7d6334e8d8a' in cache
    False
    """

    def __init__(self, maxsize=None):
        if maxsize is None:
            maxsize = float("+inf")
        if maxsize <= 0:
            raise ValueError("maxsize must be greater than 0")
        self.maxsize = maxsize
        self._values = OrderedDict()

    def add(self, value):
        key = self.get_key(value)
        if key in self._values:
            del self._values[key]
        else:
            while len(self._values) >= self.maxsize:
                self._values.popitem(last=False)
        self._values[key] = value
        return key

    def __getitem__(self, key):
        self._values.move_to_end(key)
        return self._values[key]

    def __contains__(self, key):
        return key in self._values

    def __len__(self):
        return len(self._values)

    def clear(self):
        self._values.clear()

    def get_missing(self, items):
        return [name for name, key in items if key not in self]

    def add_many(self, values):
        """
        Add all values from ``values`` list to cache. Return a list of keys.
        """
        return [self.add(value) for value in values]

    @classmethod
    def get_key(cls, value):
        value_json = json.dumps(value, sort_keys=True, ensure_ascii=False)
        return hashlib.sha1(value_json.encode("utf8")).hexdigest()
class DBPBible:
    """
    Class to download and manipulate an online DBP Bible.

    """

    def __init__(self, damRoot):
        """
        Create the Digital Bible Platform Bible object.
            Accepts a 6-character code which is the initial part of the DAM:
                1-3: Language code, e.g., ENG
                4-6: Version code, e.g., ESV
        """
        if BibleOrgSysGlobals.debugFlag and debuggingThisModule:
            print(exp("DBPBible.__init__( {!r} )").format(damRoot))
            assert damRoot and isinstance(damRoot, str) and len(damRoot) == 6
        self.damRoot = damRoot

        # Setup and initialise the base class first
        # InternalBible.__init__( self, givenFolderName, givenName, encoding )

        self.key = getSecurityKey()  # Our personal key
        self.URLFixedData = "?v={}&key={}".format(DPB_VERSION, self.key)

        # See if the site is online by making a small call to get the API version
        self.URLTest = "api/apiversion"
        self.onlineVersion = None
        result = self.getOnlineData(self.URLTest)
        if result:
            if "Version" in result:
                self.onlineVersion = result["Version"]
        else:
            logging.critical("DPBBible.__init__: Digital Bible Platform appears to be offline")
            raise ConnectionError  # What should this really be?

        self.bookList = None
        if self.onlineVersion:  # Check that this particular resource is available by getting a list of books
            bookList = self.getOnlineData(
                "library/book", "dam_id=" + self.damRoot
            )  # Get an ordered list of dictionaries -- one for each book
            if BibleOrgSysGlobals.debugFlag and debuggingThisModule:
                print("DBPBible.__init__: bookList", len(bookList))  # , bookList )

            # if 0:# Get all book codes and English names
            # bookCodeDictList = self.getOnlineData( "library/bookname", "language_code=ENG" )
            ## Not sure why it comes back as a dictionary in a one-element list
            # assert isinstance( bookCodeDictList, list ) and len(bookCodeDictList)==1
            # bookCodeDict = bookCodeDictList[0]
            # assert isinstance( bookCodeDict, dict )
            # print( "bookCodeDict", len(bookCodeDict), bookCodeDict )

        self.books = OrderedDict()
        if bookList:  # Convert to a form that's easier for us to use later
            for bookDict in bookList:
                OSISCode = bookDict["book_id"]
                # print( "OSIS", OSISCode )
                BBB = BibleOrgSysGlobals.BibleBooksCodes.getBBBFromOSIS(OSISCode)
                if isinstance(BBB, list):
                    BBB = BBB[0]  # Take the first one if we get something like ['EZR','EZN']
                # print( "BBB", BBB )
                # print( bookDict )
                self.books[BBB] = bookDict
            del bookList

        self.cache = OrderedDict()

    # end of DBPBible.__init__

    def __str__(self):
        """
        Create a string representation of the Bible object.
        """
        indent = 2
        result = "DBP online Bible object"
        if self.onlineVersion:
            result += ("\n" if result else "") + " " * indent + _("Online version: {}").format(self.onlineVersion)
        result += ("\n" if result else "") + " " * indent + _("DAM root: {}").format(self.damRoot)
        if self.books:
            result += ("\n" if result else "") + " " * indent + _("Books: {}").format(len(self.books))
        return result

    # end of DBPBible.__str__

    def __len__(self):
        """
        This method returns the number of books in the Bible.
        """
        return len(self.books)

    # end of DBPBible.__len__

    def __contains__(self, BBB):
        """
        This method checks whether the Bible contains the BBB book.
        Returns True or False.
        """
        if BibleOrgSysGlobals.debugFlag:
            assert isinstance(BBB, str) and len(BBB) == 3

        return BBB in self.books

    # end of DBPBible.__contains__

    def __getitem__(self, keyIndex):
        """
        Given an index, return the book object (or raise an IndexError)
        """
        if BibleOrgSysGlobals.debugFlag and debuggingThisModule:
            print(exp("DBPBible.__getitem__( {!r} )").format(keyIndex))

        return list(self.books.items())[keyIndex][1]  # element 0 is BBB, element 1 is the book object

    # end of DBPBible.__getitem__

    def getOnlineData(self, fieldREST, additionalParameters=None):
        """
        Given a string, e.g., "api/apiversion"
            Does an HTTP GET to our site.
            Receives the JSON result (hopefully)
            Converts the JSON bytes to a JSON string
            Loads the JSON string into a dictionary
            Returns the dictionary.
        Returns None if the data cannot be fetched.
        """
        if BibleOrgSysGlobals.debugFlag and debuggingThisModule:
            print(exp("DBPBible.getOnlineData( {!r} {!r} )").format(fieldREST, additionalParameters))

        if BibleOrgSysGlobals.verbosityLevel > 2:
            print("Requesting data from {} for {}…".format(URL_BASE, self.damRoot))
        requestString = "{}{}{}{}".format(
            URL_BASE, fieldREST, self.URLFixedData, "&" + additionalParameters if additionalParameters else ""
        )
        # print( "Request string is", repr(requestString) )
        try:
            responseJSON = urllib.request.urlopen(requestString)
        except urllib.error.URLError:
            if BibleOrgSysGlobals.debugFlag:
                logging.critical(
                    "DBPBible.getOnlineData: error fetching {} {}".format(repr(fieldREST), repr(additionalParameters))
                )
            return None
        responseSTR = responseJSON.read().decode("utf-8")
        return json.loads(responseSTR)

    # end of DBPBible.getOnlineData

    def getVerseData(self, key):
        """
        """
        if BibleOrgSysGlobals.debugFlag and debuggingThisModule:
            print(exp("DBPBible.getVerseData( {!r} ) for {!r}").format(key, self.damRoot))

        if str(key) in self.cache:
            if BibleOrgSysGlobals.debugFlag and debuggingThisModule:
                print("  " + exp("Retrieved from cache"))
            self.cache.move_to_end(str(key))
            return self.cache[str(key)]
        BBB = key.getBBB()
        if BBB in self.books:
            info = self.books[BBB]
            rawData = self.getOnlineData(
                "text/verse",
                "dam_id={}&book_id={}&chapter_id={}&verse_start={}".format(
                    info["dam_id"] + "2ET", info["book_id"], key.getChapterNumber(), key.getVerseNumber()
                ),
            )
            resultList = []
            if isinstance(rawData, list) and len(rawData) == 1:
                rawDataDict = rawData[0]
                # print( len(rawDataDict), rawDataDict )
                assert len(rawDataDict) == 8 and isinstance(rawDataDict, dict)
                resultList.append(
                    ("p#", "p#", rawDataDict["paragraph_number"], rawDataDict["paragraph_number"], [])
                )  # Must be first for Biblelator
                if key.getVerseNumber() == "1":
                    resultList.append(("c#", "c#", rawDataDict["chapter_id"], rawDataDict["chapter_id"], []))
                resultList.append(("v", "v", rawDataDict["verse_id"], rawDataDict["verse_id"], []))
                resultList.append(
                    ("v~", "v~", rawDataDict["verse_text"].strip(), rawDataDict["verse_text"].strip(), [])
                )
                self.cache[str(key)] = resultList
                if len(self.cache) > MAX_CACHED_VERSES:
                    # print( "Removing oldest cached entry", len(self.cache) )
                    self.cache.popitem(last=False)
            return resultList
        else:  # This version doesn't have this book
            if debuggingThisModule or BibleOrgSysGlobals.verbosityLevel > 2:
                print("  getVerseData: {} not in {} {}".format(BBB, self.damRoot, self.books.keys()))

    # end of DBPBible.getVerseData

    def getContextVerseData(self, key):
        """
        Given a BCV key, get the verse data.

        (The Digital Bible Platform doesn't provide the context so an empty list is always returned.)
        """
        if BibleOrgSysGlobals.debugFlag and debuggingThisModule:
            print(exp("DBPBible.getContextVerseData( {!r} ) for {!r}").format(key, self.damRoot))

        return self.getVerseData(key), []  # No context
Example #22
0
class BufferPool:
    """
  A buffer pool implementation.

  Since the buffer pool is a cache, we do not provide any serialization methods.

  >>> schema = DBSchema('employee', [('id', 'int'), ('age', 'int')])
  >>> bp = BufferPool()
  >>> fm = Storage.FileManager.FileManager(bufferPool=bp)
  >>> bp.setFileManager(fm)

  # Check initial buffer pool size
  >>> len(bp.pool.getbuffer()) == bp.poolSize
  True

  """

    # Default to a 10 MB buffer pool.
    defaultPoolSize = 10 * (1 << 20)

    # Buffer pool constructor.
    #
    # REIMPLEMENT this as desired.
    #
    # Constructors keyword arguments, with defaults if not present:
    # pageSize       : the page size to be used with this buffer pool
    # poolSize       : the size of the buffer pool
    def __init__(self, **kwargs):
        self.pageSize = kwargs.get("pageSize", io.DEFAULT_BUFFER_SIZE)
        self.poolSize = kwargs.get("poolSize", BufferPool.defaultPoolSize)
        self.pool = io.BytesIO(b"\x00" * self.poolSize)
        self.fileMgr = None

        ####################################################################################
        # DESIGN QUESTION: what other data structures do we need to keep in the buffer pool?
        self.freeList = list(range(self.numPages()))
        self.pageDict = OrderedDict()

    def setFileManager(self, fileMgr):
        self.fileMgr = fileMgr

    # Basic statistics

    def numPages(self):
        return math.floor(self.poolSize / self.pageSize)

    def numFreePages(self):
        return len(self.freeList)

    def size(self):
        return self.poolSize

    def freeSpace(self):
        return self.numFreePages() * self.pageSize

    def usedSpace(self):
        return self.size() - self.freeSpace()

    # Buffer pool operations

    def hasPage(self, pageId):
        return pageId in self.pageDict

    def getPage(self, pageId):
        if not self.hasPage(pageId):
            if len(self.freeList) == 0:
                self.evictPage()

            page = self.readFreePage(pageId)

        self.pageDict.move_to_end(pageId)
        (page, _) = self.pageDict[pageId]

        return page

    # Removes a page from the page map, returning it to the free
    # page list without flushing the page to the disk.
    def discardPage(self, pageId):
        if self.hasPage(pageId):
            (page, offset) = self.pageDict[pageId]
            self.pageDict.pop(pageId, None)
            self.freeList.append(offset)
            return page
        else:
            return None

    def flushPage(self, pageId):
        page = self.discardPage(pageId)
        if page is not None:
            self.fileMgr.writePage(page)

    # Evict using LRU policy.
    # We implement LRU through the use of an OrderedDict, and by moving pages
    # to the end of the ordering every time it is accessed through getPage()
    def evictPage(self):
        (page, offset) = list(self.pageDict.values())[0]
        self.flushPage(page.pageId)

    def readFreePage(self, pageId):
        offset = self.freeList.pop()
        buffer = self.pool.getbuffer()[offset : offset + self.pageSize]
        page = self.fileMgr.readPage(pageId, buffer)
        self.pageDict[pageId] = (page, offset)
        return page

    def clear(self):
        for key, value in self.pageDict.items():
            if value[0].isDirty():
                self.flushPage(key)
Example #23
0
list2 = sorted(d1.values())
print(list1, list2)

list3 = [value for (key, value) in sorted(d1.items())]
print(list3)


# --------------------------------------------------------------
# using an OrderedDict
from collections import OrderedDict

d4 = OrderedDict([("Smith", 43), ("James", 32), ("Edwards", 36), ("Cramer", 29)])
for val in d4.values():
    print(val, end=" ")
else:
    print()

d4.move_to_end("Smith")
for val in d4.values():
    print(val, end=" ")
else:
    print()


# --------------------------------------------------------------
# using a defaultdict
from collections import defaultdict

d5 = defaultdict(str)
d5["greet1"] = "hello"
print(d5["greet1"], d5["greet2"])
Example #24
0
class Task(object):
    """
    A class that represents the one task on a task list. Tasks contain a name, 
    a parent task (defaults to None), a list of subtasks (aka child tasks), and
    a dictionary of properties. Default properties include notes, completed, 
    deadline, time_needed, and location. Other properties can be added to the 
    property dictionary by the user.
    @author: Keien Ohta
    @version: 0.0
    last edited: 10-07-2011 00:28 GMT-7
    last edited: 12-05-2011 16:00 GMT-8
    """

    _default_props = None

    def _default_properties(self):
        if Task._default_props is None:
            Task._default_props = OrderedDict(
                {
                    "notes": TaskProperty("notes", ""),
                    "completed": TaskProperty("completed", "False"),
                    "deadline": TaskProperty("deadline", "", "time"),
                    "time_needed": TaskProperty("time_needed", 0, "num"),
                    "location": TaskProperty("location", ""),
                }
            )
        return Task._default_props

    def __init__(self, name="", parent=None, properties=None, subtasks=None):
        """
        Construct a task with the given name, parent, and properties
        """
        self.name = name
        self._init_properties(properties)  # Initialize default properties
        self._init_subtasks()
        self._parent = parent
        if parent != None:
            parent.add(self)

    def _init_properties(self, props=None):
        """
        Initialzes the properties to the given dictionary of properties,
        or <code>_default_properties()</code> if the given is <code>None</code>.
        """
        if props is not None and type(props) is OrderedDict:
            self._task_properties = props.copy()
        elif props is not None and type(props) is dict:
            self._task_properties = OrderedDict(props)
        else:
            self._task_properties = self._default_properties()

    def _init_subtasks(self, sub=None):
        """
        Initialzes the subtasks to the given list of subtasks, or
        an empty list if the given is <code>None</code>.
        """
        if sub is not None and type(sub) == list:
            self.subtasks = list(sub)
        else:
            self.subtasks = list()

    def rename(self, name):
        """
        Renames this task to the given name.
        """
        self.name = name

    @property
    def parent(self):
        """
        The parent Task of this Task.
        """
        return self._parent

    def set_parent(self, parent):
        """
        Changes the parent of this Task to a new Task. Note that this function
        needs to also modify the list of children in the previous parent.
        Returns its old parent.
        """
        old_parent = self.parent
        if old_parent is not None:
            old_parent.remove_subtask(self)
        self._parent = parent
        return old_parent

    def remove_parent(self):
        """
        Changes the parent of this Task to its old parent's parent.
        Returns the old parent.
        """
        old_parent = self.parent
        grandparent = None
        if self.parent is not None:
            old_parent = self.parent
            old_parent.remove(self)
            grandparent = old_parent.parent
            if grandparent is not None:
                grandparent.add_subtask(self)
        return self.set_parent(grandparent)

    def to_top_level(self):
        """
        Makes this Task parentless (top level).
        Returns the old parent.
        """
        return self.set_parent(None)

    def get_subtask(self, index):
        """
        Returns the subtask at the given index.
        """
        return self.subtasks[index]

    def get_subtasks(self):
        """
        Returns the list of subtasks of this Task.
        """
        return self.subtasks

    def add_subtask(self, subtask):
        """
        Adds the given Task to the list of subtasks.
        """
        assert isinstance(subtask, Task), "You are not adding a Task!"
        self.subtasks.append(subtask)
        subtask.set_parent(self)

    def remove_subtask(self, subtask):
        """
        Removes the given Task from the list of subtasks.
        """
        assert isinstance(subtask, Task), "You are not removing a Task!"
        self.subtasks.remove(subtask)

    def swap_subtasks(self, subtask1, subtask2):
        """
        Swaps the subtasks at the given indices
        """
        if type(subtask1) is int and type(subtask2) is int:
            temp = self.subtasks[subtask1]
            self.subtasks[subtask1] = self.subtasks[subtask2]
            self.subtasks[subtask2] = temp

    def move_subtask_to_end(self, index):
        """
        Moves the subtask at the given index to the end.
        """
        self.subtasks.append(self.subtasks.pop(index))

    def add_property(self, task_property, value="", category=""):
        """
        Adds the given TaskProperty. If a TaskProperty with the same name
        already exists, overwrites the previous TaskProperty with the new one.
        If the given value for task_property is not a TaskProperty, creates
        a TaskProperty with the given value and category to add.
        """
        if type(task_property) is TaskProperty:
            self._task_properties[task_property.name] = task_property
        else:
            tp = TaskProperty(task_property, value, category)
            self._task_properties[str(task_property)] = tp

    def remove_property(self, task_property):
        """
        Removes the given <code>TaskProperty</code> or the
        <code>TaskProperty</code> with a given name from the dictionary of
        <code>TaskProperty</code>s.
        """
        if type(task_property) is TaskProperty:
            name = task_property.name
        else:
            name = task_property
        del self._task_properties[name]

    def get_properties(self):
        """
        Returns the OrderedDict of <code>TaskProperty</code>s
        """
        return self._task_properties

    def get_property(self, name):
        """
        Returns the <code>TaskProperty</code> with the given name.
        """
        return self._task_properties[name]

    def move_property_to_end(self, prop):
        """
        Moves the <code>TaskProperty</code> with the given key to the end.
        """
        self._task_properties.move_to_end(prop)

    def swap_properties(self, prop1, prop2):
        """
        Swaps the order of the <code>TaskProperty</code>s with the given keys.
        If either of the given keys is not found, raises a key error.
        Note: This algorithm is inefficient and can be made more efficient.
        """
        posits = [-1, -1]
        keys = list(self.get_properties().keys())
        for i in range(len(keys)):
            if keys[i] == prop1:
                posits[0] = i
            if keys[i] == prop2:
                posits[1] = i
        if posits[0] < 0:
            raise KeyError("Key " + str(prop1) + " not found.")
        if posits[1] < 0:
            raise KeyError("Key " + str(prop2) + " not found.")
        new_key_order = list()
        for i in range(min(posits[0], posits[1]), len(keys)):
            if i == posits[0]:
                new_key_order.append(keys[posits[1]])
            elif i == posits[1]:
                new_key_order.append(keys[posits[0]])
            else:
                new_key_order.append(keys[i])
        for key in new_key_order:
            self._task_properties.move_to_end(key)

    def __str__(self):
        """
        Returns a string representation of this Task in the form:
        name(number_of_subtasks)
        """
        return str(self.name) + "(" + str(len(self.subtasks)) + ")"
Example #25
0
while len(ordered_pairs) > 0:
    count += 1
    min_count = len(last_element(ordered_pairs))
    candidates_queue = [value for value in ordered_pairs if len(ordered_pairs[value]) == min_count]
    # print("Candidates:")
    # print(candidates_queue)
    fine_sort(ordered_pairs, candidates_queue)
    # print("Best candidate:")
    # print(list(ordered_pairs.keys())[-1])
    player_one = ordered_pairs.popitem()
    player_one_friends = sorted([friend for friend in player_one[1]], key=lambda item: len(ordered_pairs[item]))
    for friend in player_one[1]:
        # remove player_one form his friends sets
        ordered_pairs[friend].remove(player_one[0])

    ordered_pairs.move_to_end(player_one_friends[0])
    player_two = ordered_pairs.popitem()

    for friend in player_two[1]:
        # remove player_two form his friends sets
        ordered_pairs[friend].remove(player_two[0])

    ordered_pairs = OrderedDict(sorted(ordered_pairs.items(), key=lambda item: len(item[1]), reverse=True))

    while len(ordered_pairs) > 0:
        # clear orphan players
        tmp = ordered_pairs.popitem()
        if len(tmp[1]) != 0:
            ordered_pairs[tmp[0]] = tmp[1]
            break