Ejemplo n.º 1
0
 def __init__(self, dbdir=None, baseiri=None, clear=False):
     '''
     Versa connection object built from DiskCache collection object
     '''
     self._dbdir = dbdir
     self._db = Index(dbdir)
     if clear: self._db.clear()
     self._ensure_abbreviations()
     #self.create_model()
     self._baseiri = baseiri
     self._abbr_index = 0
     return
Ejemplo n.º 2
0
Archivo: index.py Proyecto: efiop/dvc
    def __init__(
        self,
        tmp_dir: "StrPath",
        name: str,
    ):  # pylint: disable=super-init-not-called
        from diskcache import Index

        from dvc.fs.local import LocalFileSystem
        from dvc.utils.fs import makedirs

        self.index_dir = os.path.join(tmp_dir, self.INDEX_DIR, name)
        makedirs(self.index_dir, exist_ok=True)
        self.fs = LocalFileSystem()
        self.index = Index(self.index_dir)
Ejemplo n.º 3
0
    def __init__(self,
                 tmp_dir: "StrPath",
                 name: str,
                 dir_suffix: Optional[str] = None):  # pylint: disable=super-init-not-called
        from diskcache import Index

        from dvc.fs.local import LocalFileSystem
        from dvc.utils.fs import makedirs

        self.index_dir = os.path.join(tmp_dir, self.INDEX_DIR, name)
        makedirs(self.index_dir, exist_ok=True)
        self.fs = LocalFileSystem()
        self.index = Index(self.index_dir)

        if not dir_suffix:
            dir_suffix = self.fs.CHECKSUM_DIR_SUFFIX
        self.dir_suffix = dir_suffix
Ejemplo n.º 4
0
 def create_uuid(self, directory: Optional[str] = './cache'):
     """Create UID for samples to get the actual name for later use cases"""
     print("START caching file names.")
     if osp.exists(directory):
         self.cache_names = Index(directory)
     else:
         self.cache_names = Index(
             directory, {
                 str(index): str(uuid.uuid5(uuid.NAMESPACE_X500,
                                            str(index)))
                 for index, _ in enumerate(self.dataset)
             })
         # use values as keys
         # self.cache_names.update({
         #         value: key for key, value in self.cache_names.items()
         #     })
     print("END caching file names.")
Ejemplo n.º 5
0
def db_client():
    """Return cached db connection"""
    global DB_CIENT
    if DB_CIENT is None:
        DB_CIENT = Index(iscc_registry.settings.db_dir)
        log.debug(
            f"Initialized ISCC state DB in {iscc_registry.settings.db_dir}")
    return DB_CIENT
Ejemplo n.º 6
0
def get_comp_order_book():
    print("查找comp挂单大于100的")
    futures = ftx.public_get_futures()["result"]
    msg = {}
    name = "comp_alarm"
    ALARM_SIZE = 100
    # msg["COMP-PERP"]["asks"][[187.1, 1.0471], [187.1, 1.0471]]
    comp_pd = [i["name"] for i in futures if "COMP" in i["name"]]
    for comp in comp_pd:
        orderbook = ftx.fetch_order_book(comp, 1)
        for i in orderbook["asks"]:
            price, size = i
            if size >= ALARM_SIZE:
                if comp in msg:
                    msg[comp]["asks"].append(i)
                else:
                    msg[comp] = {"asks": [], "bids": []}
                    msg[comp]["asks"].append(i)
        for i in orderbook["bids"]:
            price, size = i
            if size >= ALARM_SIZE:
                if comp in msg:
                    msg[comp]["bids"].append(i)
                else:
                    msg[comp] = {"asks": [], "bids": []}
                    msg[comp]["bids"].append(i)
    if msg:
        new_msg = {}
        for k, v in msg.items():
            if v["asks"] and v["bids"]:
                new_msg[k] = v
        result = Index("data/result")
        send_txt = ""
        msg = new_msg
        if msg:
            for k, v in msg.items():
                send_txt += k
                send_txt += "\n\n"
                send_txt += json.dumps(v)
                send_txt += "\n\n"

            if name in result:
                before_data = result[name]
                if msg != before_data:
                    sendMail(
                        f"{k} 有挂单超过100了",
                        send_txt,
                        ["*****@*****.**", "*****@*****.**"],
                    )
                    result[name] = msg
            else:
                sendMail("COMP有挂单超过50了", send_txt,
                         ["*****@*****.**", "*****@*****.**"])
                result[name] = msg
Ejemplo n.º 7
0
 def __init__(self, dbdir=None, baseiri=None, clear=False):
     '''
     Versa connection object built from DiskCache collection object
     '''
     self._dbdir = dbdir
     self._db = Index(dbdir)
     if clear: self._db.clear()
     self._ensure_abbreviations()
     #self.create_model()
     self._baseiri = baseiri
     self._abbr_index = 0
     return
Ejemplo n.º 8
0
 def _get_colour_variant(self, key):
     global m2
     m = m2
     m.parse(key)
     xforms = m.get_colour_transforms()
     for xform in xforms:
         tstate = m.transformed_state(xform)
         try:
             value = Index.__getitem__(self, tstate)
             return xform.invert(value)
         except (KeyError):
             pass
     return None
Ejemplo n.º 9
0
 def __getitem__(self, key):
     try:
         value = Index.__getitem__(self, key)
         return value
     except (KeyError):
         pass
     value = self._get_colour_variant(key)
     if value is not None:
         return value
     value = self._get_transformed_variant(key)
     if value is not None:
         return value
     raise KeyError('No matching entry %s' % key)
Ejemplo n.º 10
0
class CacheSet:
    """
    A Set-like Cache that wraps :class:`diskcache.Index`
    """
    def __init__(self, iterable=(), directory=None):
        self.index = Index(directory)
        self.update(*iterable)

    def add(self, obj: object):
        if not isinstance(obj, Hashable):
            raise TypeError(f'{type(obj)} is not Hashable',
                            f'{str(obj)[:100]}...')
        self.index[hash(obj)] = obj

    def remove(self, obj):
        try:
            self.index.pop(hash(obj))
        except KeyError:
            raise KeyError(obj)

    def pop(self):
        return self.index.popitem()[1]

    def update(self, *obj):
        for o in obj:
            self.add(o)

    def clear(self):
        self.index.clear()

    def difference(self, other):
        self.__sub__(other)

    def copy(self):
        return set(self).copy()

    def __iter__(self):
        return iter(self.index.values())

    def __contains__(self, item):
        self__hash = hash(item)
        return self__hash in self.index

    def __sub__(self, other: Iterable):
        return set(self) - set(other)

    def __len__(self):
        return len(self.index)

    def __str__(self):
        return f'CacheSet({", ".join(self)})'

    def __repr__(self):
        return str(self)

    @property
    def directory(self):
        return self.index.directory
Ejemplo n.º 11
0
Archivo: index.py Proyecto: efiop/dvc
class ObjectDBIndex(ObjectDBIndexBase):
    """Class for indexing hashes in an ODB."""

    INDEX_SUFFIX = ".idx"
    INDEX_DIR = "index"

    def __init__(
        self,
        tmp_dir: "StrPath",
        name: str,
    ):  # pylint: disable=super-init-not-called
        from diskcache import Index

        from dvc.fs.local import LocalFileSystem
        from dvc.utils.fs import makedirs

        self.index_dir = os.path.join(tmp_dir, self.INDEX_DIR, name)
        makedirs(self.index_dir, exist_ok=True)
        self.fs = LocalFileSystem()
        self.index = Index(self.index_dir)

    def __iter__(self):
        return iter(self.index)

    def __contains__(self, hash_):
        return hash_ in self.index

    def dir_hashes(self):
        """Iterate over .dir hashes stored in the index."""
        yield from (hash_ for hash_, is_dir in self.index.items() if is_dir)

    def clear(self):
        """Clear this index (to force re-indexing later)."""
        from diskcache import Timeout

        try:
            self.index.clear()
        except Timeout as exc:
            raise ObjectDBError("Failed to clear ODB index") from exc

    def update(self, dir_hashes: Iterable[str], file_hashes: Iterable[str]):
        """Update this index, adding the specified hashes."""
        from diskcache import Timeout

        try:
            with self.index.transact():
                for hash_ in dir_hashes:
                    self.index[hash_] = True
            with self.index.transact():
                for hash_ in file_hashes:
                    self.index[hash_] = False
        except Timeout as exc:
            raise ObjectDBError("Failed to update ODB index") from exc

    def intersection(self, hashes: Set[str]):
        """Iterate over values from `hashes` which exist in the index."""
        yield from hashes.intersection(self.index.keys())
Ejemplo n.º 12
0
    def __init__(
        self,
        tmp_dir: "StrPath",
        name: str,
    ):  # pylint: disable=super-init-not-called
        from diskcache import Cache, Index

        from dvc.fs.local import LocalFileSystem
        from dvc.utils.fs import makedirs

        self.index_dir = os.path.join(tmp_dir, self.INDEX_DIR, name)
        makedirs(self.index_dir, exist_ok=True)
        self.fs = LocalFileSystem()
        self.index = Index.fromcache(
            Cache(
                self.index_dir,
                disk_pickle_protocol=4,
                eviction_policy="none",
            ))
Ejemplo n.º 13
0
def get_btc_move_diff(futures):
    "获取各个btc move的差价"
    perpetuals = [i for i in futures if i["type"] == "move"]
    perpetual_names = [{"future_name": i["name"]} for i in perpetuals]
    strikePrices = get_future_stats(perpetual_names)
    strikePrices = {i["name"]: i for i in strikePrices}
    btc_moves = []
    for i in perpetuals:
        name = i["name"]
        if strikePrices[name].get("strikePrice", False):
            c = round(i["index"], 4)  # 指数成分市场的平均市价
            mark = i["mark"]  # 期货标记价格
            strikePrice = round(strikePrices[name]["strikePrice"],
                                4)  # 到期日开始时标的价格
            diff = round(abs(abs(c - strikePrice) - mark), 4)
            c1 = round(abs(c - strikePrice), 4)  ## 预计交割价
            print(
                f"{name}: 行权价:{strikePrice}, BTC指数价:{c}, move价格:{mark},差价:{diff}"
            )

            _append = {
                "index": c,
                "mark": mark,
                "strikePrice": strikePrice,
                "diff": diff,
                "name": name,
                "c1": c1,
            }
            btc_moves.append(_append)
            if diff > 3000:
                result = Index("data/result")
                if name in result:
                    t = result[name]  # 上次发邮件时间
                    if int(time.time()) - t > 60 * 60:  # 超过一小时
                        sendMail("FTX MOVE 差价大于500了", json.dumps(_append),
                                 ["*****@*****.**"])
                        result[name] = int(time.time())
                else:
                    sendMail("FTX MOVE 差价大于500了", json.dumps(_append),
                             ["*****@*****.**"])
                    result[name] = int(time.time())
    return sorted(btc_moves, key=lambda k: k["diff"], reverse=True)
Ejemplo n.º 14
0
                            logger.error('HEALTH: network is unreachable!!')
                            put_state_msg('ERROR')
                    else:
                        logger.debug('HEALTH: wait_for_nets is {}'.format(wait_for_nets))

            elif NODE_SETTINGS['mode'] == 'adhoc':
                if not NODE_SETTINGS['nwid']:
                    logger.warning('ADHOC: network ID not set {}'.format(NODE_SETTINGS['nwid']))
                else:
                    logger.debug('ADHOC: found network ID {}'.format(NODE_SETTINGS['nwid']))
                if netStatus != []:
                    nwid = netStatus[0]['identity']
                    addr = netStatus[0]['ztaddress']
                    nwstat = netStatus[0]['status']
                    logger.debug('ADHOC: found network with ID {}'.format(nwid))
                    logger.debug('ADHOC: network status is {}'.format(nwstat))
                    if addr:
                        res = do_peer_check(addr)

                # elif NODE_SETTINGS['nwid']:
                #     run_ztcli_cmd(action='join', extra=NODE_SETTINGS['nwid'])

        except Exception as exc:
            logger.error('nodestate exception was: {}'.format(exc))
            raise exc


cache = Index(get_cachedir())
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
Ejemplo n.º 15
0
class connection(connection_base):
    def __init__(self, dbdir=None, baseiri=None, clear=False):
        '''
        Versa connection object built from DiskCache collection object
        '''
        self._dbdir = dbdir
        self._db = Index(dbdir)
        if clear: self._db.clear()
        self._ensure_abbreviations()
        #self.create_model()
        self._baseiri = baseiri
        self._abbr_index = 0
        return

    def copy(self, contents=True):
        '''Create a copy of this model, optionally without contents (i.e. just configuration)'''
        cp = connection(dbdir=self._dbdir, baseiri=self._baseiri)
        # FIXME!!!!!
        if contents: cp.add_many(self._relationships)
        return cp

    def query(self, expr):
        '''Execute a Versa query'''
        raise NotImplementedError

    def size(self):
        '''Return the number of links in the model'''
        count = 0
        for origin in self._db:
            if origin.startswith('@'):
                continue
            for rel, targetplus in self._db[origin].items():
                count += len(targetplus)
        return count
        #return  self._db_coll.count() - connection.META_ITEM_COUNT

    def __iter__(self):
        abbrevs = self._abbreviations()
        cursor = self._db_coll.find()
        index = 0
        for origin in self._db:
            if origin.startswith('@'):
                continue
            for rel, targetplus in self._db[origin].items():
                try:
                    rel = rel.format(**abbrevs)
                except (KeyError, ValueError):
                    pass
                count += len(targetplus)
                for target, attribs in targetplus:
                    try:
                        target = target.format(**abbrevs)
                    except (KeyError, ValueError):
                        pass
                    yield index, (origin, rel, target, attribs)
                    index += 1

    # FIXME: Statement indices don't work sensibly without some inefficient additions. Use e.g. match for delete instead
    def match(self,
              origin=None,
              rel=None,
              target=None,
              attrs=None,
              include_ids=False):
        '''
        Iterator over relationship IDs that match a pattern of components

        origin - (optional) origin of the relationship (similar to an RDF subject). If omitted any origin will be matched.
        rel - (optional) type IRI of the relationship (similar to an RDF predicate). If omitted any relationship will be matched.
        target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object. If omitted any target will be matched.
        attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
        include_ids - If true include statement IDs with yield values
        '''
        abbrevs = self._abbreviations()
        index = 0
        if origin is None:
            extent = self._db
        else:
            extent = [origin]

        for origin in extent:
            if origin.startswith('@'):
                continue
            for xrel, xtargetplus in self._db.get(origin, {}).items():
                try:
                    xrel = xrel.format(**abbrevs)
                except (KeyError, ValueError):
                    pass
                if rel and rel != xrel:
                    continue
                for xtarget, xattrs in xtargetplus:
                    index += 1
                    # FIXME: only expand target abbrevs if of resource type?
                    try:
                        xtarget = xtarget.format(**abbrevs)
                    except (KeyError, ValueError):
                        pass
                    if target and target != xtarget:
                        continue
                    matches = True
                    if attrs:
                        for k, v in attrs.items():
                            if k not in xattrs or xattrs.get(k) != v:
                                matches = False
                    if matches:
                        if include_ids:
                            yield index, (origin, xrel, xtarget, xattrs)
                        else:
                            yield origin, xrel, xtarget, xattrs

        return

    def multimatch(self,
                   origin=None,
                   rel=None,
                   target=None,
                   attrs=None,
                   include_ids=False):
        '''
        Iterator over relationship IDs that match a pattern of components, with multiple options provided for each component

        origin - (optional) origin of the relationship (similar to an RDF subject), or set of values. If omitted any origin will be matched.
        rel - (optional) type IRI of the relationship (similar to an RDF predicate), or set of values. If omitted any relationship will be matched.
        target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object, or set of values. If omitted any target will be matched.
        attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
        include_ids - If true include statement IDs with yield values
        '''
        raise NotImplementedError
        origin = origin if origin is None or isinstance(origin, set) else set(
            [origin])
        rel = rel if rel is None or isinstance(rel, set) else set([rel])
        target = target if target is None or isinstance(target, set) else set(
            [target])
        for index, curr_rel in enumerate(self._relationships):
            matches = True
            if origin and curr_rel[ORIGIN] not in origin:
                matches = False
            if rel and curr_rel[RELATIONSHIP] not in rel:
                matches = False
            if target and curr_rel[TARGET] not in target:
                matches = False
            if attrs:
                for k, v in attrs.items():
                    if k not in curr_rel[
                            ATTRIBUTES] or curr_rel[ATTRIBUTES].get(k) != v:
                        matches = False
            if matches:
                if include_ids:
                    yield index, (curr_rel[0], curr_rel[1], curr_rel[2],
                                  curr_rel[3].copy())
                else:
                    yield (curr_rel[0], curr_rel[1], curr_rel[2],
                           curr_rel[3].copy())
        return

    def add(self, origin, rel, target, attrs=None):
        '''
        Add one relationship to the model

        origin - origin of the relationship (similar to an RDF subject)
        rel - type IRI of the relationship (similar to an RDF predicate)
        target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
        attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}
        '''
        if not origin:
            raise ValueError('Relationship origin cannot be null')
        if not rel:
            raise ValueError('Relationship ID cannot be null')

        attrs = attrs or {}

        origin_obj = self._db.get(origin)
        rel = self._abbreviate(rel)
        target = self._abbreviate(target)

        if origin_obj is None:
            self._db[origin] = {rel: [(target, attrs)]}
        else:
            origin_obj.setdefault(rel, []).append((target, attrs))
            self._db[origin] = origin_obj
        return

    def add_many(self, rels):
        '''
        Add a list of relationships to the extent

        rels - a list of 0 or more relationship tuples, e.g.:
        [
            (origin, rel, target, {attrname1: attrval1, attrname2: attrval2}),
        ]

        origin - origin of the relationship (similar to an RDF subject)
        rel - type IRI of the relationship (similar to an RDF predicate)
        target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
        attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}
        '''
        for curr_rel in rels:
            attrs = {}
            if len(curr_rel) == 3:
                origin, rel, target = curr_rel
            elif len(curr_rel) == 4:
                origin, rel, target, attrs = curr_rel
            else:
                raise ValueError
            self.add(origin, rel, target, attrs)
        return

    #FIXME: Replace with a match_to_remove method
    def remove(self, index):
        '''
        Delete one or more relationship, by index, from the extent

        index - either a single index or a list of indices
        '''
        raise NotImplementedError
        if hasattr(index, '__iter__'):
            ind = set(index)
        else:
            ind = [index]

        # Rebuild relationships, excluding the provided indices
        self._relationships = [
            r for i, r in enumerate(self._relationships) if i not in ind
        ]

    def __getitem__(self, i):
        raise NotImplementedError

    def __eq__(self, other):
        return repr(other) == repr(self)

    def _abbreviations(self):
        abbrev_obj = self._db['@_abbreviations']
        return abbrev_obj

    def _abbreviate(self, rid):
        '''
        Abbreviate a relationship or resource ID target for efficient storage
        in the DB. Works only with a prefix/suffix split of hierarchical HTTP-like IRIs,
        e.g. 'http://example.org/spam/eggs' becomes something like '{a23}eggs'
        and afterward there will be an entry in the prefix map from 'a23' to 'http://example.org/spam/'
        The map can then easily be used with str.format
        '''
        if not isinstance(
                rid, str) or '/' not in rid or not iri.matches_uri_syntax(rid):
            return rid
        head, tail = rid.rsplit('/', 1)
        head += '/'
        pmap = self._db['@_abbreviations']
        assert pmap is not None
        #FIXME: probably called too often to do this every time
        inv_pmap = {v: k for k, v in pmap.items()}
        if head in inv_pmap:
            prefix = inv_pmap[head]
        else:
            prefix = f'a{self._abbr_index}'
            pmap[prefix] = head
            self._abbr_index += 1
            self._db['@_abbreviations'] = pmap
        post_rid = '{' + prefix + '}' + tail.replace('{', '{{').replace(
            '}', '}}')
        return post_rid

    def _ensure_abbreviations(self):
        if '@_abbreviations' not in self._db:
            self._db['@_abbreviations'] = {}
        return
Ejemplo n.º 16
0
class connection(connection_base):
    def __init__(self, dbdir=None, baseiri=None, clear=False):
        '''
        Versa connection object built from DiskCache collection object
        '''
        self._dbdir = dbdir
        self._db = Index(dbdir)
        if clear: self._db.clear()
        self._ensure_abbreviations()
        #self.create_model()
        self._baseiri = baseiri
        self._abbr_index = 0
        return

    def copy(self, contents=True):
        '''Create a copy of this model, optionally without contents (i.e. just configuration)'''
        cp = connection(dbdir=self._dbdir, baseiri=self._baseiri)
        # FIXME!!!!!
        if contents: cp.add_many(self._relationships)
        return cp

    def query(self, expr):
        '''Execute a Versa query'''
        raise NotImplementedError

    def size(self):
        '''Return the number of links in the model'''
        count = 0
        for origin in self._db:
            if origin.startswith('@'):
                continue
            for rel, targetplus in self._db[origin].items():
                count += len(targetplus)
        return count
        #return  self._db_coll.count() - connection.META_ITEM_COUNT

    def __iter__(self):
        abbrevs = self._abbreviations()
        cursor = self._db_coll.find()
        index = 0
        for origin in self._db:
            if origin.startswith('@'):
                continue
            for rel, targetplus in self._db[origin].items():
                count += len(targetplus)
                for target, attribs in targetplus:
                    yield index, (origin, rel.format(**abbrevs), target.format(**abbrevs), attribs)
                    index += 1

    # FIXME: Statement indices don't work sensibly without some inefficient additions. Use e.g. match for delete instead
    def match(self, origin=None, rel=None, target=None, attrs=None, include_ids=False):
        '''
        Iterator over relationship IDs that match a pattern of components

        origin - (optional) origin of the relationship (similar to an RDF subject). If omitted any origin will be matched.
        rel - (optional) type IRI of the relationship (similar to an RDF predicate). If omitted any relationship will be matched.
        target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object. If omitted any target will be matched.
        attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
        include_ids - If true include statement IDs with yield values
        '''
        abbrevs = self._abbreviations()
        index = 0
        if origin is None:
            extent = self._db
        else:
            extent = [origin]

        for origin in extent:
            if origin.startswith('@'):
                continue
            for xrel, xtargetplus in self._db.get(origin, {}).items():
                xrel = xrel.format(**abbrevs)
                if rel and rel != xrel:
                    continue
                for xtarget, xattrs in xtargetplus:
                    index += 1
                    # FIXME: only expand target abbrevs if of resource type?
                    xtarget = xtarget.format(**abbrevs)
                    if target and target != xtarget:
                        continue
                    matches = True
                    if attrs:
                        for k, v in attrs.items():
                            if k not in xattrs or xattrs.get(k) != v:
                                matches = False
                    if matches:
                        if include_ids:
                            yield index, (origin, xrel, xtarget, xattrs)
                        else:
                            yield origin, xrel, xtarget, xattrs

        return

    def multimatch(self, origin=None, rel=None, target=None, attrs=None, include_ids=False):
        '''
        Iterator over relationship IDs that match a pattern of components, with multiple options provided for each component

        origin - (optional) origin of the relationship (similar to an RDF subject), or set of values. If omitted any origin will be matched.
        rel - (optional) type IRI of the relationship (similar to an RDF predicate), or set of values. If omitted any relationship will be matched.
        target - (optional) target of the relationship (similar to an RDF object), a boolean, floating point or unicode object, or set of values. If omitted any target will be matched.
        attrs - (optional) attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}. If any attribute is specified, an exact match is made (i.e. the attribute name and value must match).
        include_ids - If true include statement IDs with yield values
        '''
        raise NotImplementedError
        origin = origin if origin is None or isinstance(origin, set) else set([origin])
        rel = rel if rel is None or isinstance(rel, set) else set([rel])
        target = target if target is None or isinstance(target, set) else set([target])
        for index, curr_rel in enumerate(self._relationships):
            matches = True
            if origin and curr_rel[ORIGIN] not in origin:
                matches = False
            if rel and curr_rel[RELATIONSHIP] not in rel:
                matches = False
            if target and curr_rel[TARGET] not in target:
                matches = False
            if attrs:
                for k, v in attrs.items():
                    if k not in curr_rel[ATTRIBUTES] or curr_rel[ATTRIBUTES].get(k) != v:
                        matches = False
            if matches:
                if include_ids:
                    yield index, (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())
                else:
                    yield (curr_rel[0], curr_rel[1], curr_rel[2], curr_rel[3].copy())
        return

    def add(self, origin, rel, target, attrs=None):
        '''
        Add one relationship to the model

        origin - origin of the relationship (similar to an RDF subject)
        rel - type IRI of the relationship (similar to an RDF predicate)
        target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
        attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}
        '''
        if not origin: 
            raise ValueError('Relationship origin cannot be null')
        if not rel: 
            raise ValueError('Relationship ID cannot be null')

        attrs = attrs or {}

        origin_obj = self._db.get(origin)
        rel = self._abbreviate(rel)
        target = self._abbreviate(target)
        
        
        rel_info = {'rid': rel, 'instances': [[target, attrs]]}
        if origin_obj is None:
            self._db[origin] = {rel: [(target, attrs)]}
        else:
            origin_obj.setdefault(rel, []).append((target, attrs))
            self._db[origin] = origin_obj
        return

    def add_many(self, rels):
        '''
        Add a list of relationships to the extent

        rels - a list of 0 or more relationship tuples, e.g.:
        [
            (origin, rel, target, {attrname1: attrval1, attrname2: attrval2}),
        ]

        origin - origin of the relationship (similar to an RDF subject)
        rel - type IRI of the relationship (similar to an RDF predicate)
        target - target of the relationship (similar to an RDF object), a boolean, floating point or unicode object
        attrs - optional attribute mapping of relationship metadata, i.e. {attrname1: attrval1, attrname2: attrval2}
        '''
        for curr_rel in rels:
            attrs = {}
            if len(curr_rel) == 3:
                origin, rel, target = curr_rel
            elif len(curr_rel) == 4:
                origin, rel, target, attrs = curr_rel
            else:
                raise ValueError
            self.add(origin, rel, target, attrs)
        return

    #FIXME: Replace with a match_to_remove method
    def remove(self, index):
        '''
        Delete one or more relationship, by index, from the extent

        index - either a single index or a list of indices
        '''
        raise NotImplementedError
        if hasattr(index, '__iter__'):
            ind = set(index)
        else:
            ind = [index]

        # Rebuild relationships, excluding the provided indices
        self._relationships = [r for i, r in enumerate(self._relationships) if i not in ind]

    def __getitem__(self, i):
        raise NotImplementedError

    def __eq__(self, other):
        return repr(other) == repr(self)

    def _abbreviations(self):
        abbrev_obj = self._db['@_abbreviations']
        return abbrev_obj
        
    def _abbreviate(self, rid):
        '''
        Abbreviate a relationship or resource ID target for efficient storage
        in the DB. Works only with a prefix/suffix split of hierarchical HTTP-like IRIs,
        e.g. 'http://example.org/spam/eggs' becomes something like '{a23}eggs'
        and afterward there will be an entry in the prefix map from 'a23' to 'http://example.org/spam/'
        The map can then easily be used with str.format
        '''
        if not isinstance(rid, str) or '/' not in rid or not iri.matches_uri_syntax(rid):
            return rid
        head, tail = rid.rsplit('/', 1)
        head += '/'
        pmap = self._db['@_abbreviations']
        assert pmap is not None
        #FIXME: probably called too often to do this every time
        inv_pmap = {v: k for k, v in pmap.items()}
        if head in inv_pmap:
            prefix = inv_pmap[head]
        else:
            prefix = f'a{self._abbr_index}'
            pmap[prefix] = head
            self._abbr_index += 1
            self._db['@_abbreviations'] = pmap
        post_rid = '{' + prefix + '}' + tail
        return post_rid
        
    def _ensure_abbreviations(self):
        if '@_abbreviations' not in self._db:
            self._db['@_abbreviations'] = {}
        return
Ejemplo n.º 17
0
from ..config import index_path
from diskcache import Index

index = Index(index_path.get())


Ejemplo n.º 18
0
from web3 import Web3, WebsocketProvider
import json
from sendMail import sendMail
from diskcache import Index

result = Index("data/result")

import os, sys, time

w3 = Web3(
    WebsocketProvider(
        "wss://mainnet.infura.io/ws/v3/cd42b3642f1441629f66000f8e544d5d",
        websocket_timeout=30,
    ))

with open("erc20.json") as f:
    erc20abi = json.loads(f.read())

comp = w3.eth.contract(address="0xc00e94Cb662C3520282E6f5717214004A7f26888",
                       abi=erc20abi)


def go():
    a1 = comp.events.Transfer.createFilter(fromBlock="latest",
                                           toBlock="pending")
    print("开始检测大于500的comp转账")
    while True:
        c = a1.get_new_entries()
        for i in c:
            amount = i["args"]["amount"]
            amount = w3.fromWei(amount, "ether")
Ejemplo n.º 19
0
PREDICTION_LOOP_SLEEP = float(os.getenv("PREDICTION_LOOP_SLEEP", "0.06"))
BATCH_COLLECTION_SLEEP_IF_EMPTY_FOR = float(
    os.getenv("BATCH_COLLECTION_SLEEP_IF_EMPTY_FOR", "60")
)
BATCH_COLLECTION_SLEEP_FOR_IF_EMPTY = float(
    os.getenv("BATCH_COLLECTION_SLEEP_FOR_IF_EMPTY", "1")
)
MANAGER_LOOP_SLEEP = float(os.getenv("MANAGER_LOOP_SLEEP", "8"))

_request_index = os.path.join(QUEUE_DIR, f"{QUEUE_NAME}.request_index")
_results_cache = os.path.join(QUEUE_DIR, f"{QUEUE_NAME}.results_cache")
_metrics_cache = os.path.join(QUEUE_DIR, f"{QUEUE_NAME}.metrics_cache")
_meta_index = os.path.join(QUEUE_DIR, f"{QUEUE_NAME}.META_INDEX")

REQUEST_INDEX = Index(_request_index)
RESULTS_INDEX = Cache(_results_cache)
METRICS_CACHE = Cache(_metrics_cache)
META_INDEX = Index(_meta_index)

META_INDEX["IS_FILE_INPUT"] = IS_FILE_INPUT
META_INDEX["PREDICTION_LOOP_SLEEP"] = PREDICTION_LOOP_SLEEP
META_INDEX["BATCH_COLLECTION_SLEEP_IF_EMPTY_FOR"] = BATCH_COLLECTION_SLEEP_IF_EMPTY_FOR
META_INDEX["BATCH_COLLECTION_SLEEP_FOR_IF_EMPTY"] = BATCH_COLLECTION_SLEEP_FOR_IF_EMPTY
META_INDEX["MANAGER_LOOP_SLEEP"] = MANAGER_LOOP_SLEEP
META_INDEX["TOTAL_REQUESTS"] = 0

FASTDEPLOY_UI_PATH = os.getenv(
    "FASTDEPLOYUI",
    os.path.join(os.path.split(os.path.abspath(__file__))[0], "fastdeploy-ui"),
)
Ejemplo n.º 20
0
           'nwid': 'b6079f73ca8129ad',
           'objtype': 'network',
           'private': True,
           'remoteTraceLevel': 0,
           'remoteTraceTarget': None,
           'revision': 1,
           'routes': [],
           'rules': [{'not': False, 'or': False, 'type': 'ACTION_ACCEPT'}],
           'rulesSource': '',
           'tags': [],
           'v4AssignMode': {'zt': False},
           'v6AssignMode': {'6plane': False, 'rfc4193': False, 'zt': False}}


# has_aging = False
cache = Index(get_cachedir(dir_name='fpn_test', user_dirs=True))
net_q = Deque(get_cachedir(dir_name='net_queue', user_dirs=True))
max_age = NODE_SETTINGS['max_cache_age']
utc_stamp = datetime.datetime.now(utc)  # use local time for console

client = mock_zt_api_client()


# special test cases
def json_check(data):
    import json

    json_dump = json.dumps(data, indent=4, separators=(',', ': '))
    json_load = json.loads(json_dump)
    assert data == json_load
Ejemplo n.º 21
0
import ipywidgets as widgets  # type: ignore
import numpy as np  # type: ignore
import matplotlib.pyplot as plt  # type: ignore
import hashlib  # type: ignore

from pathlib import Path
from PIL import Image  # type: ignore
from wordcloud import ImageColorGenerator, WordCloud  # type: ignore
from diskcache import Index  # type: ignore

from ipybible import IMG_DATA_DIR

LOVE_MASK_IMG = IMG_DATA_DIR / "love.png"
CLOUD_INDEX = Index()


def hash_txt(text: str) -> str:
    hash_object = hashlib.sha256(text.encode("utf-8"))
    hex_dig = hash_object.hexdigest()
    return hex_dig


def generate_cloud(text: str, mask_img: Path = LOVE_MASK_IMG):
    hashed_text = hash_txt(text)
    out = widgets.Output()
    mask = np.array(Image.open(mask_img))
    with out:
        if hashed_text in CLOUD_INDEX:
            wordcloud_bible = CLOUD_INDEX[hashed_text]
        else:
            wordcloud_bible = WordCloud(
Ejemplo n.º 22
0
from networkx import Graph
from .common import get_largest_element_sequence

# Occasionally users specify market orders on uniswap

# Orders are considered market orders if their limit price distance
# to the obtained price is greater than
IS_MARKET_ORDER_TOL = 0 #.001

# Fix market orders so that they are at this distance from the obtained price
LIMIT_PRICE_TOL = 0.1



uniswap = UniswapClient()
disk_cache = Index(".cache")

print(f"Cache size: {len(disk_cache)}")


def load_swaps(filename):
    """Parse a csv file to a list of dicts."""
    r = []
    with open(filename, "r") as f:
        reader = csv.reader(f)
        first = True
        for row in reader:
            # Skip header.
            if first:
                first = False
                continue
Ejemplo n.º 23
0
 def __init__(self, iterable=(), directory=None):
     self.index = Index(directory)
     self.update(*iterable)
Ejemplo n.º 24
0
    def __init__(self):
        self.cache_dir = resource_filename(__name__, '_cache')
        self.cache = Index(self.cache_dir)
        self.domain_names = []

        self._prepopulate()
Ejemplo n.º 25
0
 def __setitem__(self, key, value):
     return Index.__setitem__(self, key, value)
Ejemplo n.º 26
0
import numpy as np  # type: ignore
import spacy  # type: ignore

from typing import List
from spacy.tokens.doc import Doc  # type: ignore
from sklearn.feature_extraction.text import CountVectorizer  # type: ignore
from sklearn.metrics.pairwise import cosine_similarity  # type: ignore
from diskcache import Cache, Index  # type: ignore
from dataclasses import dataclass
from hashlib import sha256

from ipybible import BIBLE_DATA_DIR

SIM_CACHE: Cache = Cache()
BIBLE_INDEX = Index(str(BIBLE_DATA_DIR))


@dataclass
class SpacyLangModel:
    nlp: spacy
    stop_words: List[str]


def normalize_text(text: str,
                   spacy_model: SpacyLangModel,
                   index_name: Index = BIBLE_INDEX):
    index_key = sha256(text.encode("utf-8")).hexdigest()
    if index_key in index_name:
        return BIBLE_INDEX[index_key]
    else:
        doc: Doc = spacy_model.nlp(text.lower())
Ejemplo n.º 27
0

def archivar_resultado(main_tex, main_name, problemas, filename, formato):
    """Vuelca el resultado a disco, a un archivo .zip o .tar.gz"""

    nombre = filename
    if formato == "zip":
        dump = dumper.ZipFile(nombre + ".zip")
    else:
        dump = dumper.TarFile(nombre + ".tar.gz")
    with dump as d:
        d.dump(main_tex, problemas)
    return d.name


cache = Index("/tmp/json2latex/cache")


def json2latex(data, main_name="examen.tex", formato="tgz"):
    progress = RQprogress()
    md5 = hashlib.md5("{}{}{}".format(data, main_name,
                                      formato).encode("utf8")).hexdigest()
    if md5 in cache:
        return cache[md5]
    datos = json.loads(data)
    main_tex, problemas = (converter.Examen2Tex(
        to_latex=True, progress=progress).convert_to_exam(datos, skip=False))
    progress.update("Volcando resultados a fichero")
    f = archivar_resultado(main_tex=main_tex,
                           main_name=main_name,
                           problemas=problemas,
Ejemplo n.º 28
0
# Target:   Python 3.6

import datetime

from diskcache import Index
from node_tools import update_state, get_cachedir
from node_tools import ENODATA, NODE_SETTINGS

try:
    from datetime import timezone
    utc = timezone.utc
except ImportError:
    from daemon.timezone import UTC
    utc = UTC()

cache = Index(get_cachedir())
max_age = NODE_SETTINGS['max_cache_age']
utc_stamp = datetime.datetime.now(utc)  # use local time for console

# reset timestamp if needed
if 'utc-time' in cache:
    stamp = cache['utc-time']
    cache_age = utc_stamp - stamp  # this is a timedelta
    print('Cache age is: {} sec'.format(cache_age.seconds))
    print('Maximum cache age: {} sec'.format(max_age))
    if cache_age.seconds > max_age:
        print('Cache data is too old!!')
        print('Stale data will be removed!!')
        cache.clear()

size = len(cache)
Ejemplo n.º 29
0
            peer_keys = find_keys(cache, 'peer')
            print('Returned peer keys: {}'.format(peer_keys))
            load_cache_by_type(cache, peer_data, 'peer')

            # get/display all available network data
            await client.get_data('controller/network')
            print('{} networks found'.format(len(client.data)))
            net_list = client.data
            net_data = []
            for net_id in net_list:
                # print(net_id)
                # Get details about each network
                await client.get_data('controller/network/{}'.format(net_id))
                # pprint(client.data)
                net_data.append(client.data)

            # load_cache_by_type(cache, net_data, 'net')
            # net_keys = find_keys(cache, 'net')
            # print('{} network keys found'.format(len(net_list)))
            # pprint(net_data)

        except Exception as exc:
            # print(str(exc))
            raise exc


cache = Index(get_cachedir(dir_name='ctlr_data'))
# cache.clear()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())