예제 #1
0
    def test_stats(self):
        for size in SIZES:
            l = LRU(size)
            for i in range(size):
                l[i] = str(i)

            self.assertTrue(l.get_stats() == (0, 0))

            val = l[0]
            self.assertTrue(l.get_stats() == (1, 0))

            val = l.get(0, None)
            self.assertTrue(l.get_stats() == (2, 0))

            val = l.get(-1, None)
            self.assertTrue(l.get_stats() == (2, 1))

            try:
                val = l[-1]
            except:
                pass

            self.assertTrue(l.get_stats() == (2, 2))

            l.clear()
            self.assertTrue(len(l) == 0)
            self.assertTrue(l.get_stats() == (0, 0))
예제 #2
0
class AnnoyanceFilters(commands.Cog):
    """ Filter out content which is poorly behaved on Discord """
    def __init__(self, bot):
        self.bot = bot
        self._settings_cache = LRU(1024)

    def get_guild_settings(self, guild_id: int) -> GuildSettings:

        if r := self._settings_cache.get(guild_id, None):
            return r

        cursor = self.bot._conn.cursor()

        res = cursor.execute(
            """
            SELECT mods_immune, admins_immune, remove_apngs, remove_excessive_html_elements
            FROM annoyance_filters
            WHERE guild_id = ?
            """,
            (guild_id, ),
        ).fetchone()

        cursor.close()

        self._settings_cache[guild_id] = settings = (GuildSettings(
            *res) if res else GuildSettings())
        return settings
예제 #3
0
    def test_stats(self):
        for size in SIZES:
            l = LRU(size)
            for i in range(size):
                l[i] = str(i)

            self.assertTrue(l.get_stats() == (0, 0))

            val = l[0]
            self.assertTrue(l.get_stats() == (1, 0))

            val = l.get(0, None)
            self.assertTrue(l.get_stats() == (2, 0))

            val = l.get(-1, None)
            self.assertTrue(l.get_stats() == (2, 1))

            try:
                val = l[-1]
            except:
                pass

            self.assertTrue(l.get_stats() == (2, 2))

            l.clear()
            self.assertTrue(len(l) == 0)
            self.assertTrue(l.get_stats() == (0, 0))
예제 #4
0
    def test_hits(self):
        for size in SIZES:
            l = LRU(size)
            for i in range(size):
                l[i] = str(i)

            val = l[0]
            self.assertTrue(l.get_hits() == 1)
            self.assertTrue(l.get_misses() == 0)

            val = l.get(0, None)
            self.assertTrue(l.get_hits() == 2)
            self.assertTrue(l.get_misses() == 0)

            val = l.get(-1, None)
            self.assertTrue(l.get_hits() == 2)
            self.assertTrue(l.get_misses() == 1)

            try:
                val = l[-1]
            except:
                pass

            self.assertTrue(l.get_hits() == 2)
            self.assertTrue(l.get_misses() == 2)

            l.clear()
            self.assertTrue(len(l) == 0)
            self.assertTrue(l.get_hits() == 0)
            self.assertTrue(l.get_misses() == 0)
예제 #5
0
def testLRU():
    print ('######### LRU ########\n')
    lru = LRU(5)
    for i in range(8):
        lru.put(str(i), i)
        if i == 5:
            lru.get(str(i))
    lru.remove('3')
    lru.showNodes()
예제 #6
0
 def test_get_and_del(self):
     l = LRU(2)
     l[1] = '1'
     self.assertEqual('1', l.get(1))
     self.assertEqual('1', l.get(2, '1'))
     self.assertIsNone(l.get(2))
     self.assertEqual('1', l[1])
     self.assertRaises(KeyError, lambda: l['2'])
     with self.assertRaises(KeyError):
         del l['2']
예제 #7
0
 def test_get_and_del(self):
     l = LRU(2)
     l[1] = "1"
     self.assertEqual("1", l.get(1))
     self.assertEqual("1", l.get(2, "1"))
     self.assertIsNone(l.get(2))
     self.assertEqual("1", l[1])
     self.assertRaises(KeyError, lambda: l["2"])
     with self.assertRaises(KeyError):
         del l["2"]
예제 #8
0
 def test_get_and_del(self):
     l = LRU(2)
     l[1] = '1'
     self.assertEqual('1', l.get(1))
     self.assertEqual('1', l.get(2, '1'))
     self.assertIsNone(l.get(2))
     self.assertEqual('1', l[1])
     self.assertRaises(KeyError, lambda: l['2'])
     with self.assertRaises(KeyError):
         del l['2']
예제 #9
0
class topic4:
    def __init__(self, c_hash, c_user, c_words):
        self.topic_count =1
        self.l1 = LRU(c_hash)
        self.l2 = LRU(c_user)

    def set_hashLRU(self,l):
        self.set(self.l1, l)

    def set_userLRU(self,l):
        self.set(self.l2, l)



    def set(self, lru, l):
        for k in l:
            v = lru.get(k,0)
            lru[k]=v+1

    def set_cluster(self, hashtags, users, words):
        for k in hashtags:
            self.l1[k]=self.l1.get(k,0)+1
        for k in users:
            self.l2[k]=self.l2.get(k,0)+1

        self.topic_count+=1

    def get_similarity(self,hashtags,users,words):
        h_sum = 1
        u_sum = 1
        w_sum = 1
        h_match =0
        h_ind =0
        u_ind =0
        w_ind =0
        c=0
        h1 = self.l1.get_size()
        u1 = self.l2.get_size()
        for h in hashtags:
            # l1_items=zip(*self.l1.items())
            h_sum+= self.l1.get(h,0)
            if(self.l1.has_key(h)):
                ind = self.l1.keys().index(h)
                h_ind+= h1 - ind
                h_match+= 1 if ind<250 else 0
        for u in users:
            u_sum+= self.l2.get(u,0)
            if(self.l2.has_key(u)):
                u_ind+= u1 - self.l2.keys().index(u)

        if(h_match !=0):
            c = h_match -1
        # print(h_ind,h1,u_ind,u1,w_ind,w1, h_sum,w_sum,)
        similarity = (h_ind/(h1+1))*(h_sum/sum(self.l1.values() +[1])) + (u_ind/(u1+1))*(u_sum/sum(self.l2.values()+[1]))  +c
        return similarity
예제 #10
0
def random_read(lru_len=100,times=20000):
    key_list=get_random_int(0,1500,times)
    lru_obj=LRU(lru_len) 
    for key in key_list:
        lru_obj.put(key,key)
    beg_time=time.time()
    for key in key_list:
        lru_obj.get(key)
    end_time=time.time()
    print("测试随机读操作{}次,lru_len长度为{},情况下耗时{} ".format(times,lru_len,end_time-beg_time))
    time.time()
예제 #11
0
 def test_update_item(self):
     lru = LRU(5)
     lru.set("name", "john")
     self.assertEqual(lru.cnt, 1)
     lru.set("age", 30)
     self.assertEqual(lru.get("name"), "john")
     lru.set("yob", 1990)
     self.assertEqual(lru.cnt, 3)
     lru.set("name", "jim")
     self.assertEqual(lru.get("name"), "jim")
     self.assertEqual(lru.cnt, 3)
예제 #12
0
 def test_set_above_cap(self):
     lru = LRU(3)
     lru.set("name", "john")
     self.assertEqual(lru.cnt, 1)
     lru.set("age", 30)
     lru.set("yob", 1990)
     self.assertEqual(lru.get("name"), "john")
     self.assertEqual(lru.get("age"), 30)
     self.assertEqual(lru.cnt, 3)
     self.assertEqual(lru.tail.prev.key, "yob")
     lru.set("loc", "CA")
     self.assertEqual(lru.cnt, 3)
     self.assertEqual(lru.tail.prev.key, "name")
     with self.assertRaises(KeyError):
         lru.get("yob")
예제 #13
0
def cached_mnubo_object_exists(device_id):
    """ Method to wrap the object existence checking in a cached object
    :param device_id: The device id of the object
    :return: True of the object exists or False if it doesn't
    """
    global global_cache
    global config
    now = int(time.time())

    if not isinstance(global_cache, LRU):
        if not isinstance(config['cache_max_entries'], int):
            raise ValueError('cache_max_entries must be an integer')
        global_cache = LRU(config['cache_max_entries'])

    if not isinstance(config['cache_validity_period'], int):
        raise ValueError('cache_validity_period must be an integer')

    found = global_cache.get(device_id, None)
    if found and found > now:
        rc = True
    else:
        rc = mnubo_object_exists(device_id)
        if rc:
            global_cache[device_id] = now + config['cache_validity_period']
    return rc
예제 #14
0
def random_read_write(lru_len=100,times=20000):
    """
    随机读写
    """
    key_list=get_random_int(0,1500,times)
    lru_obj=LRU(lru_len) 

    beg_time=time.time()
    for index,key in  enumerate(key_list):
        if index%2==0:
            lru_obj.put(key,key)
        else:
            lru_obj.get(key)
    end_time=time.time()
    print("测试随机读写操作{}次,lru_len长度为{},情况下耗时{} ".format(times,lru_len,end_time-beg_time))
    time.time()
예제 #15
0
 def test_access_within_size(self):
     for size in SIZES:
         l = LRU(size)
         for i in xrange(size):
             l[i] = str(i)
         for i in xrange(size):
             self.assertEquals(l[i], str(i))
             self.assertEquals(l.get(i,None), str(i))
예제 #16
0
 def test_access_within_size(self):
     for size in SIZES:
         l = LRU(size)
         for i in range(size):
             l[i] = str(i)
         for i in range(size):
             self.assertEqual(l[i], str(i))
             self.assertEqual(l.get(i,None), str(i))
예제 #17
0
class SocketCommunicator():
    def __init__(self, maxUnacceptConnections=5):
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        server_address = ('', 10000)
        self.sock.bind(server_address)
        self.sock.listen(maxUnacceptConnections)

        self.theData = []
        self.theDataLock = Lock()
        self.shutdown_event = Event()
        self.theThread = None

        self.connections = LRU(10)

    def startServer(self):
        if self.theThread is not None:
            return
        def start():
            while not self.shutdown_event.is_set():
                print('waiting for connection...')
                connection, client_address = self.sock.accept()
                print('accepted connection from: {}'.format(client_address))
                data = connection.recv(1)
                if data:
                    data = data.decode()
                    print(data)
                    print()
                    if self.connections.get(client_address) is None:
                        self.connections[client_address] = data
                    with self.theDataLock:
                        self.theData.append(data)
            self.sock.close()
        servThread = Thread(target=start, daemon=True)
        servThread.start()
        self.theThread = servThread

    def sendMessage(self, addr, port, data):
        connection = socket.create_connection((addr, port))
        connection.sendall(data)
        connection.close()

    def stop(self):
        self.shutdown_event.set()
        if self.theThread is not None:
            self.theThread.join()

    def getTheData(self):
        with self.theDataLock:
            tmp = self.theData.copy()
            self.theData = []
            return tmp
    
    def whoSent(self, data):
        items = self.connections.items()
        for addr, msg in items:
            if msg==data:
                return addr
        return None
예제 #18
0
 def test_access(self):
     for size in SIZES:
         l = LRU(size)
         n = size * 2
         for i in xrange(n):
             l[i] = str(i)
         self._check_kvi(range(n-1,size-1,-1), l)
         for i in xrange(size, n):
             self.assertEquals(l[i], str(i))
             self.assertEquals(l.get(i,None), str(i))
예제 #19
0
 def test_access(self):
     for size in SIZES:
         l = LRU(size)
         n = size * 2
         for i in range(n):
             l[i] = str(i)
         self._check_kvi(range(n-1,size-1,-1), l)
         for i in range(size, n):
             self.assertEqual(l[i], str(i))
             self.assertEqual(l.get(i,None), str(i))
예제 #20
0
class CertificateCache(object):
    def __init__(self, cache_size):
        self.certificates = LRU(cache_size)

    def get(self, cache_id):
        """
        Get the CachedCertificate for the given cache_id.
        """
        return self.certificates.get(cache_id)

    def lock(self, cache_id):
        """
        Lock the CachedCertificate for the given cache_id. If the id is not in
        the cache, create a CachedCertificate for the cache_id, add it to the
        cache, and lock it.
        """
        if cache_id in self.certificates:
            self.certificates[cache_id].lock = True
        else:
            self.certificates[cache_id] = CachedCertificate(
                lock=True,
                response=None,
            )

    def release(self, cache_id):
        if cache_id in self.certificates:
            self.certificates[cache_id].lock = False
        else:
            logging.warning(
                'Attempting to release a non-existent lock in the certificate'
                ' cache.')

    def set_response(self, cache_id, response):
        self.certificates[cache_id].response = response

    def get_cache_id(self, cn, validity, san):
        """
        Return a unique string from the provided arguments, for use in the
        certificate cache. The current day is included in the id, to ensure
        cache invalidation (minumum validity is 1 day).
        """
        date = datetime.datetime.today().strftime('%Y-%m-%d')
        return '{}{}{}{}'.format(cn, validity, san, date)
예제 #21
0
파일: clusters.py 프로젝트: nishucsd/thesis
class topic:
    def __init__(self, c_hash, c_user, c_words):
        self.topic_count =1
        self.l1 = LRU(c_hash)
        self.l2 = LRU(c_user)
        self.l3 = LRU(c_words)

    def set_hashLRU(self,l):
        self.set(self.l1, l)

    def set_userLRU(self,l):
        self.set(self.l2, l)

    def set_wordLRU(self,l):
        self.set(self.l3, l)

    def set(self, lru, l):
        for k in l:
            v = lru.get(k,0)
            lru[k]=v+1

    def set_cluster(self, hashtags, users, words):
        for k in hashtags:
            v = self.l1.get(k,0)
            self.l1[k]=v+1
        for k in users:
            v = self.l2.get(k,0)
            self.l2[k]=v+1
        for k in words:
            v = self.l3.get(k,0)
            self.l3[k]=v+1
        self.topic_count+=1

    def get_similarity(self,hashtags,users,words):
        h_sum = 0
        u_sum = 0
        w_sum = 0

        for h in hashtags:
            h_sum+= self.l1.get(h,0)
        for u in users:
            u_sum+= self.l2.get(u,0)
        for w in words:
            w_sum+= self.l3.get(w,0)

        similarity = 0.5*h_sum + 0.2*u_sum + 0.3*w_sum
        return similarity
예제 #22
0
class EasyRequests:
    __slots__ = ("bot", "loop", "session", "lock", "cache")

    def __init__(self, bot, session):
        self.bot = bot
        self.loop = bot.loop
        self.session = session

        self.lock = asyncio.Lock(loop=bot.loop)
        self.cache = LRU(64)

    @classmethod
    async def start(cls, bot):
        session = aiohttp.ClientSession(loop=bot.loop,
                                        headers=bot.http_headers)
        LOG.info("Session opened.")
        return cls(bot, session)

    def fmt_cache(self, m, url, param):
        p = ":".join([f"{k}:{v}" for k, v in param.items()])
        return f"{m}:{url}:{p}"

    def clear_cache(self, new_size=64):
        self.cache = LRU(new_size)
        LOG.info("Cleared cache, size set to %d", new_size)

    async def request(self, __method, __url, *, cache=False, **params):
        async with CacheLock(self.lock, do_lock=cache):
            check = self.cache.get(self.fmt_cache(__method, __url, params),
                                   None)
            if check and cache:
                return check

            kwargs = dict()

            kwargs["headers"] = params.pop("headers", None)
            kwargs["data"] = params.pop("data", None)
            kwargs["json"] = params.pop("json", None)

            for tries in range(1, 6):
                async with self.session.request(__method,
                                                __url,
                                                params=params,
                                                **kwargs) as r:
                    if "application/json" in r.headers["Content-Type"]:
                        data = await r.json()
                    elif "text/" in r.headers["Content-Type"]:
                        data = await r.text("utf-8")
                    else:
                        data = await r.read()

                    request_fmt = f"{r.status} {r.method} {r.url}"

                    LOG.debug("%s returned %s", request_fmt, data)

                    #  f**k zerochan 200 = 404 apparently
                    if 300 > r.status >= 200 or __url == "https://www.zerochan.net/search":
                        LOG.info("%s succeeded", request_fmt)
                        if cache:
                            self.cache[self.fmt_cache(__method, __url,
                                                      params)] = data
                        return data

                    if r.status == 429:
                        time = tries + 1
                        LOG.warning("%s RATE LIMITED (retrying in: %d)",
                                    request_fmt, time)
                        await asyncio.sleep(time, loop=self.loop)
                        continue

                    if r.status in {500, 502}:
                        time = 1 + (tries * 2)
                        LOG.warning("%s INTERNAL ERROR (retrying in: %d)",
                                    request_fmt, time)
                        await asyncio.sleep(time, loop=self.loop)
                        continue

                    LOG.error("%s errored.", request_fmt)
                    raise WebException(r, data)

            LOG.fatal("%s out of tries.", request_fmt)
            raise WebException(r, data)

    async def close(self):
        LOG.info("Session closed.")
        await self.session.close()
예제 #23
0
class PrefixManager(metaclass=MainThreadSingletonMeta):
    def __init__(self, bot: Salamander):
        self._bot: Salamander = bot
        self._cache = LRU(128)

    def get_guild_prefixes(self, guild_id: int) -> Sequence[str]:
        base = self._cache.get(guild_id, ())
        if base:
            return base

        cursor = self._bot._conn.cursor()
        res = tuple(
            pfx
            for (pfx,) in cursor.execute(
                """
                SELECT prefix FROM guild_prefixes
                WHERE guild_id=?
                ORDER BY prefix DESC
                """,
                (guild_id,),
            )
        )
        self._cache[guild_id] = res
        return res

    def add_guild_prefixes(self, guild_id: int, *prefixes: str):
        cursor = self._bot._conn.cursor()
        with self._bot._conn:

            cursor.execute(
                """
                INSERT INTO guild_settings (guild_id) VALUES (?)
                ON CONFLICT (guild_id) DO NOTHING
                """,
                (guild_id,),
            )

            cursor.executemany(
                """
                INSERT INTO guild_prefixes (guild_id, prefix)
                VALUES (?, ?)
                ON CONFLICT (guild_id, prefix) DO NOTHING
                """,
                tuple((guild_id, pfx) for pfx in prefixes),
            )

        # Extremely likely to be cached already
        try:
            del self._cache[guild_id]
        except KeyError:
            pass

    def remove_guild_prefixes(self, guild_id: int, *prefixes: str):
        cursor = self._bot._conn.cursor()
        with self._bot._conn:

            cursor.executemany(
                """
                DELETE FROM guild_prefixes WHERE guild_id=? AND prefix=?
                """,
                tuple((guild_id, pfx) for pfx in prefixes),
            )

        # Extremely likely to be cached already
        try:
            del self._cache[guild_id]
        except KeyError:
            pass
예제 #24
0
파일: DND.py 프로젝트: tabzraz/RL
class DND:
    def __init__(self, kernel, num_neighbors, max_memory, embedding_size):
        # self.dictionary = LRUCache(max_memory)
        # self.kd_tree = kdtree.create(dimensions=embedding_size)
        # rnd_projection = RandomBinaryProjections("RBP", 8)
        # distance = EuclideanDistance()
        # nearest = NearestFilter(num_neighbors)
        # self.nearpy = Engine(dim=embedding_size, lshashes=[rnd_projection], distance=distance, vector_filters=[nearest], fetch_vector_filters=[])

        self.kd_tree = None
        # self.data = []

        # self.lshash = LSHash(hash_size=embedding_size, input_dim=embedding_size, num_hashtables=10)
        self.lru = LRU(size=max_memory)

        self.num_neighbors = num_neighbors
        self.kernel = kernel
        self.max_memory = max_memory
        self.embedding_size = embedding_size
        # self.keys_added = []

    def is_present(self, key):
        return tuple(key) in self.lru  # self.lru.has_key(tuple(key))
        # return self.dictionary.get(tuple(key)) is not None
        # return self.dictionary.get(tuple(key.data.cpu().numpy()[0])) is not None

    def get_value(self, key):
        return self.lru[tuple(key)]
        # return self.dictionary.get(tuple(key))
        # return self.dictionary.get(tuple(key.data.cpu().numpy()[0]))

    def lookup(self, lookup_key):
        # TODO: Speed up search knn
        # keys = [key[0].data for key in self.kd_tree.search_knn(lookup_key, self.num_neighbors)]
        lookup_key_numpy = lookup_key.data[0].numpy()
        # lookup_key_tuple = tuple(lookup_key_numpy)
        # print(lookup_key)

        # keys = [key[0] for key in self.lshash.query_no_data(lookup_key_numpy, num_results=self.num_neighbors)]
        # keys = [key[1] for key in self.nearpy.neighbours(lookup_key_numpy)]
        if self.kd_tree is not None:
            # print(len(self.lru.keys()), lookup_key_numpy)
            # things_distances, things_index = self.kd_tree.query(lookup_key_numpy, k=self.num_neighbors, eps=1.0)
            things_index = self.kd_tree.query([lookup_key_numpy],
                                              k=min(self.num_neighbors,
                                                    len(self.kd_tree.data)),
                                              return_distance=False,
                                              sort_results=False)
            # print(things_index)
            keys = [self.lru.keys()[ii[0]] for ii in things_index]
            # print(keys)
        else:
            keys = []

        # print(keys)
        # print(keys)
        # output, kernel_sum = Variable(FloatTensor([0])), Variable(FloatTensor([0]))
        output, kernel_sum = 0, 0
        # if len(keys) != 0:
        # print(keys)
        # TODO: Speed this up since the kernel takes a significant amount of time
        for key in keys:
            # print("Key:",key, lookup_key)
            # if not np.allclose(key, lookup_key_numpy): #(key == lookup_key).data.all():
            if not np.all(key == lookup_key_numpy):
                # print("Here")
                # gg = Variable(FloatTensor(np.array(key)))
                # print(key)
                # gg = Variable(FloatTensor(key))
                gg = Variable(torch.from_numpy(np.array(key)))
                # print(tuple(key))
                # hh = lookup_key[0] - gg
                # print("Key:", gg, "Lookup key", lookup_key[0])
                # print(lookup_key[0] + gg)
                kernel_val = self.kernel(gg, lookup_key[0])
                # print("key:", self.lru.get(tuple(key)))
                # if not self.lru.has_key(tuple(key)):
                # print(keys)
                # print(tuple(key))
                # print(key in self.keys_added)
                # print(len(self.lru))
                # if tuple(key) not in self.lru:
                # print("NOT IN:", tuple(key))
                # print(len(keys))
                output += kernel_val * self.lru.get(tuple(key))
                # output += kernel_val * self.dictionary.get(tuple(key))
                # print("Key", key.requires_grad, key.volatile)
                # print("Kernel key", self.kernel(key, lookup_key).requires_grad)
                # print("Output in loop", output.requires_grad)
                kernel_sum += kernel_val  #self.kernel(key, lookup_key)
                # print(kernel_sum)
        # if len(keys) == 0:
        #     return (lookup_key * 0)[0][0]
        if isinstance(kernel_sum, int):
            return (lookup_key * 0)[0][0]
        # if kernel_sum == 0:
        # print("0 Kernel", kernel_sum)
        # if len(keys) == 0:
        # print("0 keys", len(keys))
        if kernel_sum.data[0] == 0 or len(keys) == 0:
            # print(lookup_key)
            # zeroed = (lookup_key * 0)[0][0]
            # print("Zero Lookup.", output.data, kernel_sum.data, len(keys))
            return (lookup_key * 0)[0][0]
        # print("lookup_key", lookup_key.requires_grad, lookup_key.volatile)
        # print("kernled", self.kernel(keys[0], lookup_key).requires_grad)
        # print("output", output.requires_grad, output.volatile)
        # print("ks", kernel_sum.requires_grad, kernel_sum.volatile)
        # print("Non-Zero Lookup for {}".format(lookup_key))
        output = output / kernel_sum
        # print(output)
        return output

    def upsert(self, key, value):
        # key = key.data[0].numpy()
        # print(key)
        # self.keys_added.append(key)
        # if not self.lru.has_key(tuple(key)):# self.is_present(key):
        # self.kd_tree.add(key)
        # print("Key going in", key)
        # self.lshash.index(input_point=key)
        # self.nearpy.store_vector(key, data=key)

        # print("Adding", tuple(key), key)
        # neighbours = self.nearpy.neighbours(key)
        # print(neighbours)

        self.lru[tuple(key)] = value
        # self.kd_tree = KDTree(data=self.lru.keys(), compact_nodes=False, copy_data=False, balanced_tree=False)
        self.kd_tree = KDTree(self.lru.keys())

        return
        if len(self.lru) == self.max_memory:
            # Expel least recently used key from self.dictionary and self.kd_tree if memory used is at capacity
            # deleted_key = self.dictionary.delete_least_recently_used()[0]
            # deleted_key = self.lru.peek_last_item()[0]
            # print("Deleted key:",deleted_key)
            # deleted_key = np.array(deleted_key)
            # thing = Variable(torch.from_numpy(deleted_key).float()).unsqueeze(0)
            # thing = Variable(FloatTensor(deleted_key)).unsqueeze(0)
            # print("Thing:",thing)
            # print(self.dictionary.cache.keys())
            key_to_delete = self.lru.peek_last_item()
            self.lru[tuple(key)] = value
            # self.kd_tree.remove(Variable(FloatTensor(deleted_key)).unsqueeze(0))
            # self.kd_tree.remove(deleted_key)

            # Remake the LSHASH with the deleted key
            # print("remaking")

            # self.lshash = LSHash(hash_size=self.embedding_size, input_dim=self.embedding_size)
            # for k in self.lru.keys():
            #     self.lshash.index(np.array(k))

            # print("Deleting", np.array(key_to_delete[0]))
            # self.nearpy.delete_vector(key_to_delete[0])
            # self.nearpy.clean_all_buckets()
            # for k in self.lru.keys():
            # self.nearpy.store_vector(np.array(k))

            # Checking that the lru keys are the same as the keys in the lshash
            # for key in self.lru.keys():
            #     keys_close = [key[0] for key in self.lshash.query(key, num_results=5)]
            #     # print(keys_close)
            #     for kk in keys_close:
            #         if kk not in self.lru:
            #             print("\n\nProblems! Key in LSHASH not in LRU\n\n")

            # Check length of all lru keys
            # all_lru_keys = self.lshash.query(key)
            # print("\n", len(all_lru_keys), "\n")
        else:
            self.lru[tuple(key)] = value

        self.kdtree = KDTree(self.data)
예제 #25
0
class Network(Service, NetworkAPI):
    _bootnodes: Tuple[ENRAPI, ...]
    _talk_protocols: Dict[bytes, TalkProtocolAPI]

    def __init__(self, client: ClientAPI, bootnodes: Collection[ENRAPI],) -> None:
        self.logger = get_extended_debug_logger("ddht.Network")

        self.client = client

        self._bootnodes = tuple(bootnodes)
        self.routing_table = KademliaRoutingTable(
            self.client.enr_manager.enr.node_id, ROUTING_TABLE_BUCKET_SIZE,
        )
        self._routing_table_ready = trio.Event()
        self._last_pong_at = LRU(2048)

        self._talk_protocols = {}

        self._ping_handler_ready = trio.Event()
        self._find_nodes_handler_ready = trio.Event()

    async def ready(self) -> None:
        await self._ping_handler_ready.wait()
        await self._find_nodes_handler_ready.wait()

    #
    # Proxied ClientAPI properties
    #
    @property
    def local_node_id(self) -> NodeID:
        return self.client.local_node_id

    @property
    def events(self) -> EventsAPI:
        return self.client.events

    @property
    def dispatcher(self) -> DispatcherAPI:
        return self.client.dispatcher

    @property
    def enr_manager(self) -> ENRManagerAPI:
        return self.client.enr_manager

    @property
    def pool(self) -> PoolAPI:
        return self.client.pool

    @property
    def enr_db(self) -> QueryableENRDatabaseAPI:
        return self.client.enr_db

    #
    # TALK API
    #
    def add_talk_protocol(self, protocol: TalkProtocolAPI) -> None:
        if protocol.protocol_id in self._talk_protocols:
            raise DuplicateProtocol(
                f"A protocol is already registered for '{protocol.protocol_id!r}'"
            )
        self._talk_protocols[protocol.protocol_id] = protocol

    #
    # High Level API
    #
    async def bond(
        self, node_id: NodeID, *, endpoint: Optional[Endpoint] = None
    ) -> bool:
        self.logger.debug2(
            "Bonding with %s", node_id.hex(),
        )

        try:
            pong = await self.ping(node_id, endpoint=endpoint)
        except trio.TooSlowError:
            self.logger.debug("Bonding with %s timed out during ping", node_id.hex())
            return False
        except MissingEndpointFields:
            self.logger.debug(
                "Bonding with %s failed due to missing endpoint information",
                node_id.hex(),
            )
            return False

        try:
            enr = await self.lookup_enr(
                node_id, enr_seq=pong.enr_seq, endpoint=endpoint
            )
        except trio.TooSlowError:
            self.logger.debug(
                "Bonding with %s timed out during ENR retrieval", node_id.hex(),
            )
            return False
        except EmptyFindNodesResponse:
            self.logger.debug(
                "Bonding with %s failed due to them not returing their ENR record",
                node_id.hex(),
            )
            return False

        self.routing_table.update(enr.node_id)

        self.logger.debug(
            "Bonded with %s successfully", node_id.hex(),
        )

        self._routing_table_ready.set()
        return True

    async def _bond(self, node_id: NodeID, endpoint: Optional[Endpoint] = None) -> None:
        await self.bond(node_id, endpoint=endpoint)

    async def ping(
        self,
        node_id: NodeID,
        *,
        endpoint: Optional[Endpoint] = None,
        request_id: Optional[bytes] = None,
    ) -> PongMessage:
        if endpoint is None:
            endpoint = await self.endpoint_for_node_id(node_id)
        response = await self.client.ping(node_id, endpoint, request_id=request_id)
        return response.message

    async def find_nodes(
        self,
        node_id: NodeID,
        *distances: int,
        endpoint: Optional[Endpoint] = None,
        request_id: Optional[bytes] = None,
    ) -> Tuple[ENRAPI, ...]:
        if not distances:
            raise TypeError("Must provide at least one distance")

        if endpoint is None:
            endpoint = await self.endpoint_for_node_id(node_id)
        responses = await self.client.find_nodes(
            node_id, endpoint, distances=distances, request_id=request_id
        )

        # Validate that all responses are indeed at one of the
        # specified distances.
        for response in responses:
            validate_found_nodes_distances(response.message.enrs, node_id, distances)

        return tuple(enr for response in responses for enr in response.message.enrs)

    def stream_find_nodes(
        self,
        node_id: NodeID,
        endpoint: Endpoint,
        distances: Collection[int],
        *,
        request_id: Optional[bytes] = None,
    ) -> AsyncContextManager[trio.abc.ReceiveChannel[ENRAPI]]:
        return common_network_stream_find_nodes(
            self, node_id, endpoint, distances, request_id=request_id
        )

    async def talk(
        self,
        node_id: NodeID,
        *,
        protocol: bytes,
        payload: bytes,
        endpoint: Optional[Endpoint] = None,
        request_id: Optional[bytes] = None,
    ) -> bytes:
        if endpoint is None:
            endpoint = await self.endpoint_for_node_id(node_id)
        response = await self.client.talk(
            node_id, endpoint, protocol, payload, request_id=request_id
        )
        payload = response.message.payload
        if not payload:
            raise ProtocolNotSupported(protocol)
        return response.message.payload

    async def lookup_enr(
        self, node_id: NodeID, *, enr_seq: int = 0, endpoint: Optional[Endpoint] = None
    ) -> ENRAPI:
        if node_id == self.local_node_id:
            raise Exception(f"Cannot lookup local ENR: node_id={node_id.hex()}")

        try:
            enr = self.enr_db.get_enr(node_id)
        except KeyError:
            if endpoint is None:
                # Try to use a recursive network lookup to find the desired
                # node.
                async with self.recursive_find_nodes(node_id) as enr_aiter:
                    async for found_enr in enr_aiter:
                        if found_enr.node_id == node_id:
                            endpoint = Endpoint.from_enr(found_enr)
                            break
                    else:
                        # we weren't given an endpoint and we don't have an enr which would give
                        # us an endpoint, there's no way to reach this node.
                        raise KeyError(f"Could not find ENR: node_id={node_id.hex()}")
        else:
            if enr.sequence_number >= enr_seq:
                return enr

        enr = await self._fetch_enr(node_id, endpoint=endpoint)
        try:
            self.enr_db.set_enr(enr)
        except OldSequenceNumber:
            pass

        return enr

    async def _fetch_enr(
        self, node_id: NodeID, *, endpoint: Optional[Endpoint]
    ) -> ENRAPI:
        enrs = await self.find_nodes(node_id, 0, endpoint=endpoint)
        if not enrs:
            raise EmptyFindNodesResponse(f"{node_id.hex()} did not return its ENR")

        # Assuming we're given enrs for a single node, this reduce returns the enr for
        # that node with the highest sequence number
        return reduce_enrs(enrs)[0]

    def recursive_find_nodes(
        self, target: NodeID
    ) -> AsyncContextManager[trio.abc.ReceiveChannel[ENRAPI]]:
        return common_recursive_find_nodes(self, target)

    @asynccontextmanager
    async def explore(
        self, target: NodeID, concurrency: int = 3,
    ) -> AsyncIterator[trio.abc.ReceiveChannel[ENRAPI]]:
        explorer = Explorer(self, target, concurrency)
        with trio.move_on_after(300) as scope:
            async with background_trio_service(explorer):
                await explorer.ready()

                async with explorer.stream() as receive_channel:
                    yield receive_channel

        if scope.cancelled_caught:
            self.logger.error("Timeout from `stream_locate`")

    #
    # Long Running Processes
    #
    async def run(self) -> None:
        self.manager.run_daemon_child_service(self.client)
        await self.client.wait_listening()

        self.manager.run_daemon_task(self._periodically_report_routing_table)
        self.manager.run_daemon_task(self._ping_oldest_routing_table_entry)
        self.manager.run_daemon_task(self._track_last_pong)
        self.manager.run_daemon_task(self._manage_routing_table)
        self.manager.run_daemon_task(self._pong_when_pinged)
        self.manager.run_daemon_task(self._serve_find_nodes)
        self.manager.run_daemon_task(self._handle_unhandled_talk_requests)

        await self.manager.wait_finished()

    async def _periodically_report_routing_table(self) -> None:
        async for _ in every(30, initial_delay=10):
            non_empty_buckets = tuple(
                reversed(
                    tuple(
                        (idx, bucket)
                        for idx, bucket in enumerate(self.routing_table.buckets, 1)
                        if bucket
                    )
                )
            )
            total_size = sum(len(bucket) for idx, bucket in non_empty_buckets)
            bucket_info = "|".join(
                tuple(
                    f"{idx}:{'F' if len(bucket) == self.routing_table.bucket_size else len(bucket)}"
                    for idx, bucket in non_empty_buckets
                )
            )
            self.logger.debug(
                "routing-table-info: size=%d  buckets=%s", total_size, bucket_info,
            )

    async def _ping_oldest_routing_table_entry(self) -> None:
        await self._routing_table_ready.wait()

        while self.manager.is_running:
            # Here we preserve the lazy iteration while still checking that the
            # iterable is not empty before passing it into `min` below which
            # throws an ambiguous `ValueError` otherwise if the iterable is
            # empty.
            nodes_iter = self.routing_table.iter_all_random()
            try:
                first_node_id = first(nodes_iter)
            except StopIteration:
                await trio.sleep(ROUTING_TABLE_KEEP_ALIVE)
                continue
            else:
                least_recently_ponged_node_id = min(
                    cons(first_node_id, nodes_iter),
                    key=lambda node_id: self._last_pong_at.get(node_id, 0),
                )

            too_old_at = trio.current_time() - ROUTING_TABLE_KEEP_ALIVE
            try:
                last_pong_at = self._last_pong_at[least_recently_ponged_node_id]
            except KeyError:
                pass
            else:
                if last_pong_at > too_old_at:
                    await trio.sleep(last_pong_at - too_old_at)
                    continue

            did_bond = await self.bond(least_recently_ponged_node_id)
            if not did_bond:
                self.routing_table.remove(least_recently_ponged_node_id)

    async def _track_last_pong(self) -> None:
        async with self.dispatcher.subscribe(PongMessage) as subscription:
            async for message in subscription:
                self._last_pong_at[message.sender_node_id] = trio.current_time()

    async def _manage_routing_table(self) -> None:
        # First load all the bootnode ENRs into our database
        for enr in self._bootnodes:
            try:
                self.enr_db.set_enr(enr)
            except OldSequenceNumber:
                pass

        # Now repeatedly try to bond with each bootnode until one succeeds.
        while self.manager.is_running:
            with trio.move_on_after(20):
                async with trio.open_nursery() as nursery:
                    for enr in self._bootnodes:
                        if enr.node_id == self.local_node_id:
                            continue
                        endpoint = Endpoint.from_enr(enr)
                        nursery.start_soon(self._bond, enr.node_id, endpoint)

                    await self._routing_table_ready.wait()
                    break

        # Now we enter into an infinite loop that continually probes the
        # network to beep the routing table fresh.  We both perform completely
        # random lookups, as well as targeted lookups on the outermost routing
        # table buckets which are not full.
        #
        # The `TokenBucket` allows us to burst at the beginning, making quick
        # successive probes, then slowing down once the
        #
        # TokenBucket starts with 10 tokens, refilling at 1 token every 30
        # seconds.
        token_bucket = TokenBucket(1 / 30, 10)

        async with trio.open_nursery() as nursery:
            while self.manager.is_running:
                await token_bucket.take()

                # Get the logarithmic distance to the "largest" buckets
                # that are not full.
                non_full_bucket_distances = tuple(
                    idx + 1
                    for idx, bucket in enumerate(self.routing_table.buckets)
                    if len(bucket) < self.routing_table.bucket_size  # noqa: E501
                )[-16:]

                # Probe one of the not-full-buckets with a weighted preference
                # towards the largest buckets.
                distance_to_probe = weighted_choice(non_full_bucket_distances)
                target_node_id = at_log_distance(self.local_node_id, distance_to_probe)

                async with self.recursive_find_nodes(target_node_id) as enr_aiter:
                    async for enr in enr_aiter:
                        if enr.node_id == self.local_node_id:
                            continue

                        try:
                            self.enr_db.set_enr(enr)
                        except OldSequenceNumber:
                            pass

                        nursery.start_soon(self._bond, enr.node_id)

    async def _pong_when_pinged(self) -> None:
        async def _maybe_add_to_routing_table(
            request: InboundMessage[PingMessage],
        ) -> None:
            try:
                enr = await self.lookup_enr(
                    request.sender_node_id,
                    enr_seq=request.message.enr_seq,
                    endpoint=request.sender_endpoint,
                )
            except (trio.TooSlowError, EmptyFindNodesResponse):
                return

            self.routing_table.update(enr.node_id)
            self._routing_table_ready.set()

        async with trio.open_nursery() as nursery:
            async with self.dispatcher.subscribe(PingMessage) as subscription:
                self._ping_handler_ready.set()

                async for request in subscription:
                    await self.dispatcher.send_message(
                        request.to_response(
                            PongMessage(
                                request.request_id,
                                self.enr_manager.enr.sequence_number,
                                request.sender_endpoint.ip_address,
                                request.sender_endpoint.port,
                            )
                        )
                    )
                    nursery.start_soon(_maybe_add_to_routing_table, request)

    async def _serve_find_nodes(self) -> None:
        async with self.dispatcher.subscribe(FindNodeMessage) as subscription:
            self._find_nodes_handler_ready.set()

            async for request in subscription:
                response_enrs: List[ENRAPI] = []
                distances = set(request.message.distances)
                if len(distances) != len(request.message.distances):
                    self.logger.debug(
                        "Ignoring invalid FindNodeMessage from %s@%s: duplicate distances",
                        request.sender_node_id.hex(),
                        request.sender_endpoint,
                    )
                    continue
                elif not distances:
                    self.logger.debug(
                        "Ignoring invalid FindNodeMessage from %s@%s: empty distances",
                        request.sender_node_id.hex(),
                        request.sender_endpoint,
                    )
                    continue
                elif any(
                    distance > self.routing_table.num_buckets for distance in distances
                ):
                    self.logger.debug(
                        "Ignoring invalid FindNodeMessage from %s@%s: distances: %s",
                        request.sender_node_id.hex(),
                        request.sender_endpoint,
                        distances,
                    )
                    continue

                for distance in distances:
                    if distance == 0:
                        response_enrs.append(self.enr_manager.enr)
                    elif distance <= self.routing_table.num_buckets:
                        node_ids_at_distance = self.routing_table.get_nodes_at_log_distance(
                            distance,
                        )
                        for node_id in node_ids_at_distance:
                            response_enrs.append(self.enr_db.get_enr(node_id))
                    else:
                        raise Exception("Should be unreachable")

                await self.client.send_found_nodes(
                    request.sender_node_id,
                    request.sender_endpoint,
                    enrs=response_enrs,
                    request_id=request.request_id,
                )

    async def _handle_unhandled_talk_requests(self) -> None:
        async with self.dispatcher.subscribe(TalkRequestMessage) as subscription:
            async for request in subscription:
                if request.message.protocol not in self._talk_protocols:
                    await self.client.send_talk_response(
                        request.sender_node_id,
                        request.sender_endpoint,
                        payload=b"",
                        request_id=request.message.request_id,
                    )

    #
    # Utility
    #
    async def endpoint_for_node_id(self, node_id: NodeID) -> Endpoint:
        try:
            enr = self.enr_db.get_enr(node_id)
        except KeyError:
            enr = await self.lookup_enr(node_id)

        return Endpoint.from_enr(enr)
class RewardNet():
    """Interacts with and learns from the environment."""
    def __init__(self, state_action_size, reward_size):
        """Initialize an RewardNet object.
        
        Params
        ======
            state_size (int): dimension of each state
            action_size (int): dimension of each action
        """
        self.state_action_size = state_action_size
        self.reward_size = reward_size
        set_seed()

        # Reward-Network
        self.reward_net = Network(state_action_size, reward_size).to(device)
        self.optimizer = optim.Adam(self.reward_net.parameters(), lr=LR)
        self.criterion = nn.MSELoss()

        # Replay memory
        self.memory = ReplayBuffer(BUFFER_SIZE, BATCH_SIZE, 0)
        # Reward dict - LRFU implementation not found, therefore just LRU
        self.M = LRU(BUFFER_SIZE)
        self.S = []
        self.V = 0
        # Initialize loss for tracking the progress
        self.loss = 0

    def add(self, state_action, reward):
        # Save experience in replay memory
        self.memory.add(state_action, reward)

    def add_to_M(self, sa, reward):
        # Add records to the reward dict
        self.M[sa] = reward
        if len(self.M) >= BUFFER_SIZE:
            del self.M[self.M.peek_last_item()[0]]  # discard LRU key

    def get_from_M(self, sa):
        # Retrieve items from M
        return (self.M.get(sa, 0))

    def step(self):
        # If enough samples are available in memory, get random subset and learn
        if len(self.memory) > BATCH_SIZE:
            experiences = self.memory.sample()
            self.learn(experiences)

    def act(self, state_action):
        """Returns actions for given state as per current policy.

            state (array_like): current state
        """
        sa = torch.from_numpy(state_action).float().unsqueeze(0).to(device)

        return (self.reward_net(sa))

    def learn(self, experiences):
        """Update value parameters using given batch of experience tuples.

            experiences (Tuple[torch.Tensor]): tuple of (sa, r) tuples 
        """
        state_actions, rewards = experiences

        # Get expected Reward values
        R_pred = self.reward_net(state_actions)

        # Compute loss
        loss = self.criterion(R_pred, rewards)
        print("RewardNet loss = {}".format(loss))
        # Grad descent
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        # Keep track of the loss for the history
        self.loss = loss.item()
예제 #27
0
class Py2NeoHandler(object):
    def __init__(self, host, user, pwd):
        self.graph = Graph(host=host, user=user, password=pwd)
        self.lru_cache = LRU(100)


        # MATCH (a:Node)-[r]->(b:Node) where a.relevant_terms CONTAINS 'vino' or b.relevant_terms CONTAINS 'vino' RETURN a,r,b LIMIT 10
        # MATCH (a:Node)-[r]->(b) where a.id_s > 0 and a.id_s < 100 and b.id_s > 0 and b.id_s < 100 return a,r,b LIMIT 1000

    def _create_query_relevant_terms(self, query_terms, limit=10000):
        base_query = "MATCH  (a:Node)-[r]->(b:Node) "
        base_where = " (a.relevant_terms contains '{}' and b.relevant_terms contains '{}') and "
        base_return = " return a,r,b limit {};"

        composed_query = base_query
        term_split = query_terms.split()
        print(term_split)

        if len(term_split) > 0:
            composed_query += " WHERE "

        for term in query_terms.split():
            mod_term = ' {}:'.format(term)
            composed_query += base_where.format(mod_term, mod_term)

        if len(term_split) > 0:
            composed_query = composed_query[:-4]

        composed_query += base_return.format(limit)
        print('query ' + composed_query)
        return composed_query

    def _get_or_else(self, value, default):
        if value:
            return value
        else:
            return default

    def _create_node(self, n):
        props = dict(n)

        node = Node(
            type_id =  self._get_or_else(props['type_id'], ''),
            node_type = self._get_or_else(props['node_type'], ''),
            id_s = self._get_or_else(props['id_s'], ''),
            fiscal_code = self._get_or_else(props['fiscal_code'], ''),
            relevant_terms = self._get_or_else(props['relevant_terms'], ''),
            region = self._get_or_else(props['region'], ''),
            province = self._get_or_else(props['province'], ''),
            city = self._get_or_else(props['city'], ''),
            address = self._get_or_else(props['address'], ''),
            istat_code = self._get_or_else(props['istat_code'], ''),
            adm_code = self._get_or_else(props['administrative_code'], ''),
            name = self._get_or_else(props['name'], ''),
            company_type = self._get_or_else(props['company_type'], ''),
            nation = self._get_or_else(props['nation'], ''),
        )
        return node

    def _create_link(self, src_id, rel, dst_id):
        props = dict(rel)
        return Link(src_id, dst_id, props['score'])

    def query_by_relevant_terms(self, query_terms, limit=1000):
        start = time.time()
        if query_terms in self.lru_cache:
            end = time.time()
            print('query in time {}'.format(end - start))
            return self.lru_cache.get(query_terms)
        else:
            querystring = self._create_query_relevant_terms(query_terms, limit)

            nodes = set()
            links = set()

            for src, rel, dst in self.graph.run(querystring):
                    src_node = self._create_node(src)
                    dst_node = self._create_node(dst)
                    if len(str(src_node.id)) > 0 and len(str(dst_node.id)) > 0:
                        link = self._create_link(src_node.id, rel, dst_node.id)
                        nodes.add(src_node)
                        nodes.add(dst_node)
                        links.add(link)

            end = time.time()
            print('query in time {}'.format(end - start))
            result =  Result(list(nodes), list(links))
            self.lru_cache[query_terms] = result
            return result
예제 #28
0
        if not i:
            members = await app.rest.search_members(guild_id, name=name)
            _update_cache(members)

    return None


async def member_converter(arg: WrappedArg) -> hikari.Member:
    if (msg := await caret_converter(arg)) is not None:
        if msg.member is not None:
            return msg.member

        if (member := arg.context.guild.get_member(msg.author.id)) is not None:
            return member

    if member := _member_cache.get(
            f'{arg.context.guild_id}:{arg.data.strip("<@!>")}'):
        return member

    try:
        member = await member_converter_(arg)
        _member_cache[f"{member.guild_id}:{member.id}"] = member
        return member
    except ConverterFailure:
        member = await search_member(arg.context.bot, arg.context.guild_id,
                                     arg.data)
        if not member:
            raise

        return member

예제 #29
0
class topic4:
    def __init__(self, c_hash, c_user, c_words):
        self.topic_count =1
        # self.time = (self.first,self.last)
        self.l1 = LRU(c_hash)
        self.first =""
        self.last=""
        self.lats=[]
        self.longs=[]
        self.l2 = LRU(c_user)
        self.l3 = LRU(c_words)
        self.l4 = LRU(400)
    def set_hashLRU(self,l):
        self.set(self.l1, l)

    def set_userLRU(self,l):
        self.set(self.l2, l)

    def set_wordLRU(self,l):
        self.set(self.l3, l)

    def set(self, lru, l):
        for k in l:
            v = lru.get(k,0)
            lru[k]=v+1

    def set_cluster(self, hashtags, users, words,links, cords):
        for k in hashtags:
            self.l1[k]=self.l1.get(k,0)+1
        for k in users:
            self.l2[k]=self.l2.get(k,0)+1
        for k in words:
            self.l3[k]=self.l3.get(k,0)+1
        for k in links:
            self.l4[k]=self.l4.get(k,0)+1
        if(cords is not None):
            self.lats.append(cords["coordinates"][1])
            self.longs.append(cords["coordinates"][0])
        self.topic_count+=1

    def get_similarity(self,hashtags,users,words):
        h_sum = 1
        u_sum = 1
        w_sum = 1
        h_match =0
        h_ind =0
        u_ind =0
        w_ind =0
        c=0
        h1 = self.l1.get_size()
        u1 = self.l2.get_size()
        w1 = self.l3.get_size()
        for h in hashtags:
            # l1_items=zip(*self.l1.items())
            h_sum+= self.l1.get(h,0)
            if(self.l1.has_key(h)):
                ind = self.l1.keys().index(h)
                h_ind+= h1 - ind
                h_match+= 1 if ind<250 else 0
        for u in users:
            u_sum+= self.l2.get(u,0)
            if(self.l2.has_key(u)):
                u_ind+= u1 - self.l2.keys().index(u)
        for w in words:
            w_sum+= self.l3.get(w,0)
            if(self.l3.has_key(w)):
                w_ind+= w1 - self.l3.keys().index(w)
        if(h_match !=0):
            c = h_match -1
        # print(h_ind,h1,u_ind,u1,w_ind,w1, h_sum,w_sum,)
        similarity = (h_ind/(h1+1))*(h_sum/sum(self.l1.values() +[1])) + (u_ind/(u1+1))*(u_sum/sum(self.l2.values()+[1])) + (w_ind/(w1+1))*(w_sum/sum(self.l3.values()+[1])) +c
        return similarity
    def flush1(self, cache, size):
        if(len(cache.keys())>5):
            tokens = reversed(cache.keys()[5])
            cache.clear()
            for i in tokens:
                cache[i]=1


    def flush(self):
        self.flush1(self.l1,500)
        self.flush1(self.l2, 500)
        self.flush1(self.l3,3500)
        self.topic_count=1
예제 #30
0
class MemoryStateManager:
    '''
    Meaningless for anyting other than tests
    '''
    def __init__(self, size=10):
        self.size = size
        self._data = LRU(self.size)
        self._locks = {}
        self._canceled = set()
        self.worker_id = uuid.uuid4().hex

    def set_loop(self, loop=None):
        pass

    async def update(self, task_id, data, ttl=None):
        # Updates existing data with new data
        existing = await self.get(task_id)
        existing.update(data)
        self._data[task_id] = existing

    async def get(self, task_id):
        return self._data.get(task_id, {})

    async def exists(self, task_id):
        return task_id in self._data

    async def list(self):
        for task_id in self._data.keys():
            yield task_id

    async def acquire(self, task_id, ttl):
        already_locked = await self.is_locked(task_id)
        if already_locked:
            raise TaskAlreadyAcquired(task_id)

        # Set new lock
        from guillotina_amqp.utils import TimeoutLock
        lock = TimeoutLock(self.worker_id)
        await lock.acquire(ttl=ttl)
        self._locks[task_id] = lock

    async def is_mine(self, task_id):
        if task_id not in self._locks:
            raise TaskNotFoundException(task_id)
        lock = self._locks[task_id]
        return lock.locked() and lock.worker_id == self.worker_id

    async def is_locked(self, task_id):
        if task_id not in self._locks:
            return False
        return self._locks[task_id].locked()

    async def release(self, task_id):
        if not await self.is_mine(task_id):
            # You can't refresh a lock that's not yours
            raise TaskAccessUnauthorized(task_id)
        # Release lock and pop it from data structure
        self._locks[task_id].release()
        self._locks.pop(task_id, None)

    async def refresh_lock(self, task_id, ttl):
        if task_id not in self._locks:
            raise TaskNotFoundException(task_id)

        if not await self.is_locked(task_id):
            raise Exception(f'Task {task_id} is not locked')

        if not await self.is_mine(task_id):
            # You can't refresh a lock that's not yours
            raise TaskAccessUnauthorized(task_id)

        # Refresh
        return await self._locks[task_id].refresh_lock(ttl)

    async def cancel(self, task_id):
        self._canceled.update({task_id})
        return True

    async def cancelation_list(self):
        canceled = copy.deepcopy(self._canceled)
        for task_id in canceled:
            yield task_id

    async def clean_canceled(self, task_id):
        try:
            self._canceled.remove(task_id)
            return True
        except KeyError:
            # Task id wasn't canceled
            return False

    async def is_canceled(self, task_id):
        return task_id in self._canceled

    async def _clean(self):
        self._data = LRU(self.size)
        self._locks = {}
        self._canceled = set()
예제 #31
0
class AlchemyModel(t.Generic[T],
                   QtCore.QAbstractTableModel,
                   metaclass=_SqlAlchemyTableModelMeta):
    _columns: t.Sequence[ConvertibleColumn]

    def __init__(
        self,
        model_type: t.Type[T],
        order_by: QueryableAttribute,
        *,
        columns: t.Optional[t.Sequence[ConvertibleColumn]] = None,
        page_size: int = 64,
        auto_commit: bool = True,
    ):
        super().__init__()
        self._model_type = model_type
        self._order_by_column = order_by
        self._page_size = page_size
        self._auto_commit = auto_commit

        if columns is not None:
            self._columns = columns

        if not self._columns:
            raise ValueError('Specify at least one column')

        self._header_names = tuple(' '.join(v.capitalize()
                                            for v in c.column.name.split('_'))
                                   for c in self._columns)

        self._cache = LRU(int(page_size * 2))
        self._cached_size = None

    def filter_query(self, query: Query) -> Query:
        return query

    def get_query(self) -> Query:
        return self.filter_query(EDB.Session.query(self._model_type))

    def clear_cache(self) -> None:
        self._cache.clear()
        self._cached_size = None

    def _load_page(self, offset: int, limit: int) -> None:
        for idx, model in enumerate(self.get_query().order_by(
                self._order_by_column).limit(limit).offset(offset)):
            self._cache[idx + offset] = model

    def get_item_at_index(self, index: int) -> t.Optional[T]:
        if index < 0:
            return None
        try:
            return self._cache[index]
        except KeyError:
            self._load_page(index, self._page_size)
            return self._cache.get(index, None)

    def rowCount(self, parent: QModelIndex = ...) -> int:
        if self._cached_size is None:
            self._cached_size = self.get_query().count()
        return self._cached_size

    def columnCount(self, parent: QModelIndex = ...) -> int:
        return len(self._columns)

    def data(self, index: QModelIndex, role: int = ...) -> t.Any:
        if not role in (Qt.DisplayRole, Qt.EditRole):
            return None

        row = self.get_item_at_index(index.row())
        if not row:
            return None

        column = self._columns[index.column()]

        return column.to_primitive(getattr(row, column.column.name))

    def setData(self,
                index: QModelIndex,
                value: t.Any,
                role: int = ...) -> bool:
        if role != Qt.EditRole:
            return False

        row = self.get_item_at_index(index.row())
        if not row:
            return False

        column = self._columns[index.column()]

        setattr(row, column.column.name, column.from_primitive(value))

        if self._auto_commit:
            EDB.Session.commit()

        if self._columns[index.column()] == self._order_by_column:
            self.clear_cache()

        return True

    def flags(self, index: QModelIndex) -> Qt.ItemFlags:
        return Qt.ItemIsSelectable | Qt.ItemIsEditable | Qt.ItemIsEnabled

    def headerData(self,
                   section: int,
                   orientation: Qt.Orientation,
                   role: int = ...) -> t.Any:
        if role != Qt.DisplayRole:
            return None

        if orientation == Qt.Vertical:
            return str(section + 1)

        return self._header_names[section]

    def removeRows(self,
                   row: int,
                   count: int,
                   parent: QModelIndex = ...) -> bool:
        self.beginRemoveRows(parent, row, row - 1 + count)
        pk_column = self._model_type.__mapper__.primary_key[0]
        items = list(
            filter(lambda i: i is not None,
                   (getattr(self.get_item_at_index(idx), pk_column.name)
                    for idx in range(row, row + count))))

        if not items:
            return False

        EDB.Session.query(self._model_type).filter(
            pk_column.in_(items)).delete(syncronize_session='fetch', )

        if self._auto_commit:
            EDB.Session.commit()

        self.clear_cache()
        self.endRemoveRows()
        return True

    def pop(self, row: int) -> t.Optional[T]:
        model = self.get_item_at_index(row)
        if not model:
            return
        self.removeRows(row, 1)
        return model

    def moveRows(
        self,
        sourceParent: QModelIndex,
        sourceRow: int,
        count: int,
        destinationParent: QModelIndex,
        destinationChild: int,
    ) -> bool:
        self.beginMoveRows(QModelIndex(), sourceRow, sourceRow + count - 1,
                           QModelIndex(), destinationChild)

        floor = min(sourceRow, destinationChild)

        items = [
            _item for _item in (
                self.get_item_at_index(idx)
                for idx in range(floor,
                                 max(sourceRow, destinationChild) + count))
            if _item is not None
        ]
        old_values = [
            getattr(_item, self._order_by_column.name) for _item in items
        ]

        for _ in range(count):
            items.insert(destinationChild - floor,
                         items.pop(sourceRow - floor))

        for item, new_value in zip(items, old_values):
            setattr(item, self._order_by_column.name, new_value)

        if self._auto_commit:
            EDB.Session.commit()

        self.clear_cache()

        self.endMoveRows()
        return True

    def reset(self) -> None:
        self.beginResetModel()
        self.clear_cache()
        self.endResetModel()
예제 #32
0
class SimplePluginBase(PluginBase):
    iterator = None
    
    @classmethod
    def parse_url(cls, url):
        """
        Checks if an url can be downloaded by this plugin.
        
        Returns the remote id if the url corresponds to a single post,
        a Dynamic object that can be passed to search if the url
        corresponds to multiple posts, or None if this plugin can't
        download or create a search using this url.
        """
        
        return None
    
    def __init__(self, session):
        super().__init__(session)
        
        # (category, tag) -> RemoteTag
        self._tag_cache = LRU(100)
    
    def _get_tag(self, category, tagstr):
        tag = self._tag_cache.get((category, tagstr))
        
        if tag is None:
            tag = self.session.query(RemoteTag) \
                    .filter(
                        RemoteTag.source==self.source,
                        RemoteTag.category==category,
                        RemoteTag.tag==tagstr
                    ).one_or_none()
            
            if tag is None:
                tag = RemoteTag(source=self.source, category=category, tag=tagstr)
                self.session.add(tag)
            
            self._tag_cache[category, tagstr] = tag
        
        return tag
    
    def _get_post(self, original_id):
        return self.session.query(RemotePost) \
                .filter(
                    RemotePost.source == self.source,
                    RemotePost.original_id == original_id
                ).one_or_none()
    
    def download(self, id=None, remote_post=None, preview=False):
        """
        Creates or updates a RemotePost entry along with all the associated Files,
        and downloads all files and thumbnails that aren't present yet.
        
        If remote_post is passed, its original_id will be used and it will be
        updated in place.
        
        If preview is set to True, then only the thumbnails are downloaded.
        
        Returns the downloaded RemotePost object.
        """
        
        raise NotImplementedError
    
    def search_form(self):
        """
        Returns the form or a list of forms used for searches and subscriptions.
        (e.g.: user id, a search string, or advanced options available on the website)
        """
        
        return None
    
    def get_search_details(self, options):
        """
        Returns a SearchDetails object with extra details about the search that would
        be performed by this set of options (e.g.: user timeline).
        May return None if no specific information is found (e.g.: global searches).
        """
        
        return None
    
    def search(self, options):
        """
        Creates a temporary search for a given set of search options.
        
        Returns a post iterator object.
        """
        
        if self.iterator is None:
            raise NotImplementedError
        
        iterator = self.iterator(self, options=options)
        iterator.init()
        iterator.reconfigure(direction=FetchDirection.older, num_posts=None)
        return iterator
    
    def subscription_repr(self, options):
        """
        Returns a simple representation of the subscription, used to find duplicate
        subscriptions.
        """
        
        raise NotImplementedError
    
    def subscribe(self, name, options=None, iterator=None):
        """
        Creates a Subscription entry for the given search options identified by the given name,
        should not get any posts from the post source.
        """
        
        if iterator is None:
            iterator = self.iterator(self, options=options)
        
        iterator.init()
        
        sub = Subscription(
            source=self.source,
            name=name,
            repr=self.subscription_repr(iterator.options),
            options=iterator.options.to_json(),
            state=iterator.state.to_json()
        )
        
        self.session.add(sub)
        self.session.flush()
        
        iterator.subscription = sub
        
        return iterator
    
    def create_iterator(self, subscription, direction=FetchDirection.newer, num_posts=None):
        """
        Gets the post iterator for a specific subscription.
        
        Returns a post iterator object.
        """
        
        if self.iterator is None:
            raise NotImplementedError
        
        iterator = self.iterator(self, subscription=subscription)
        iterator.init()
        iterator.reconfigure(direction=direction, num_posts=num_posts)
        return iterator
예제 #33
0
class MeituClipBatchIter(mx.io.DataIter):
    def __init__(self,
                 datadir,
                 label_file,
                 batch_size=4,
                 n_frame=32,
                 crop_size=112,
                 scale_size=128,
                 train=True,
                 lru_buffer=120,
                 gpu_id=0):
        super(MeituClipBatchIter, self).__init__(batch_size)
        self.datadir = datadir
        self.label_file = label_file
        self.batch_size = batch_size
        self.n_frame = n_frame
        self.crop_size = crop_size
        self.scale_size = scale_size
        self.train = train
        self.max_label = 0
        self.clip_lst = []
        self.load_data()

        def evicted(k, v):
            print('pop shape', k)
            del v

        self.nvvl_loader_dict = LRU(lru_buffer, evicted)
        self.gpu_id = gpu_id
        self.data_size = len(self.clip_lst)
        self.process_pool = multiprocessing.Pool(processes=10)

    def load_data(self):
        if self.train:
            video_dir = os.path.join(self.datadir, 'train_collection')
        else:
            video_dir = os.path.join(self.datadir, 'val_collection')
        with open(self.label_file, 'r') as fin:
            for line in fin.readlines():
                vid_info = line.split(',')
                file_name = os.path.join(video_dir, vid_info[0])
                labels = [int(id) for id in vid_info[1:]]
                self.max_label = max(self.max_label, max(labels))
                self.clip_list.append((file_name, labels))
            self.max_label = self.max_label + 1
        logger.info("load data from %s,num_clip_List %d" %
                    (video_dir, len(self.clip_list)))

    @property
    def provide_data(self):
        return [
            mx.io.DataDesc(name='data',
                           shape=(self.batch_size, 3, self.n_frame,
                                  self.crop_size, self.crop_size),
                           dtype=np.float32,
                           layout='NCTHW')
        ]

    @property
    def provide_label(self):
        return [
            mx.io.DataDesc(name='tags',
                           shape=(self.batch_size, self.max_label),
                           dtype=np.float32,
                           layout='NL')
        ]

    def reset(self):
        self.clip_p = 0  # self.clip_p is the index to read batch data
        if self.train:
            random.shuffle(self.clip_lst)

    def next(self):
        """Get next data batch from iterator
        :return: DataBatch raise StopIteration if the end of the data is reached
        """
        # if self.clip_p<len(self.clip_lst):
        #     batch_clips = self.clip_lst[self.clip_p:min(self.data_size,self.clip_p+self.batch_size)]
        #     if len(batch_clips)<self.batch_size:
        #         batch_clips += random.sample(self.clip_lst,self.batch_size-len(batch_clips))
        #     #padding to batch_size
        #     file_names,labels = zip(*batch_clips)
        #     data = self.sample_clips(file_names)
        #     #data type is cupy,
        #     ret = mx.io.DataBatch([mx.nd.array(cupy.asnumpy(data))],[mx.nd.array(label)])
        #     self.clip_p +=self.batch_size
        #     return ret
        # else:
        #     raise StopIteration
        #Iter single video
        # def sample_clips(self,file_names):
        #     self.process_pool.map(self.decode_func,[(file_names[p], p) for p in range(len(file_names))])
        #
        # def decode_func(self,filename,p):

        if self.clip_p < self.data_size:
            filename, tags = self.clip_lst[self.clip_p]
            if (self.clip_p + 1) % 6 == 0:
                with cupy.cuda.Device(self.gpu_id):
                    cupy.get_default_memory_pool().free_all_blocks()
            video_shape = pynvvl.video_size_from_file(filename)
            loader = self.nvvl_loader_dict.get(video_shape, None)
            if loader is None:
                loader = pynvvl.NVVLVideoLoader(device_id=self.gpu_id,
                                                log_level='error')
                self.nvvl_loader_dict[video_shape] = laoder
            count = loader.frame_count(filename)
            #aug for frame start index
            if self.is_train:
                if count <= self.n_frame:
                    frame_start = 0
                else:
                    frame_start = np.random.randint(0,
                                                    count - self.n_frame,
                                                    dtype=np.int32)
            else:
                frame_start = (count - self.n_frame) // 2

            # rescale argumentation
            width, height = video_shape
            ow, oh = width, height
            if width < height:
                ow = self.scale_size
                oh = int(self.scale_size * height / width)
            else:
                oh = self.scale_size
                ow = int(self.scale_size * width / height)
            #random crop augu
            if self.train:
                crop_x = np.random.randint(0,
                                           ow - self.crop_size,
                                           dtype=np.int32)
                crop_y = np.random.randint(0,
                                           oh - self.crop_size,
                                           dtype=np.int32)
            else:
                crop_x = (ow - self.crop_size) // 2
                crop_y = (oh - self.crop_size) // 2
            video = loader.read_sequence(filename,
                                         frame_start,
                                         count=self.n_frame,
                                         sample_model=dense,
                                         horiz_flip=False,
                                         scale_height=oh,
                                         scale_width=ow,
                                         crop_y=crop_y,
                                         crop_x=crop_x,
                                         crop_height=self.crop_size,
                                         crop_width=self.crop_size,
                                         scale_method='Linear',
                                         normalized=False)
            labels = np.zeros(shape=(self.max_label), dtype=np.float32)
            for tag_index in tags:
                labels[tag_index] = 1
            video = (video.transpose(0, 2, 3, 1) / 255 - cupy.array(
                [0.485, 0.456, 0.406])) / cupy.array([0.229, 0.224, 0.225])
            video = video.transpose(3, 0, 1, 2)
            ret = mx.io.DataBatch([
                mx.nd.aray(video.reshape(1, *video.shape)),
            ], [
                mx.nd.array(labels),
            ])
            self.clip_p += 1
            return ret
        else:
            raise StopIteration
예제 #34
0
파일: mtp.py 프로젝트: donaldmunro/pymtpfs
class MTPStorage(MTPEntry, MTPRefresh):
   def __init__(self, mtp, pstorage=None):
      global PATH_CACHE_SIZE
      MTPRefresh.__init__(self)
      self.mtp = mtp
      self.libmtp = mtp.libmtp
      self.open_device = mtp.open_device
      self.directories = None
      self.contents = LRU(PATH_CACHE_SIZE)
      if pstorage is None:
         MTPEntry.__init__(self, -3, '/')
         self.storage = None
         self.directories = []
         for dirname in self.mtp.get_storage_descriptions():
            #def __init__(self, path, id=-2, storageid=-2, folderid=-2, mtp=None, timestamp=0, is_refresh=True):
            self.directories.append(MTPFolder(path=dirname, id= -3, storageid= -3, folderid= -2, is_refresh=False)) 
         self.root = None
         self.contents[utf8(os.sep)] = self
      else:         
         self.storage = pstorage
         storage = pstorage.contents
         self.type = storage.StorageType
         self.freespace = storage.FreeSpaceInBytes
         self.capacity = storage.MaxCapacity
         path = os.sep + storage.StorageDescription
         MTPEntry.__init__(self, storage.id, path, storageid=None, folderid=0)
         self.root = MTPFolder(path=path, id=0, storageid=storage.id, folderid=0, mtp=self.mtp)
         self.contents[utf8(path)] = self.root
      
   def is_directory(self):
      return True     
   
   def get_attributes(self):
      return { 'st_atime': self.timestamp, 'st_ctime': self.timestamp, 'st_gid': os.getgid(),
               'st_mode': stat.S_IFDIR | 0755, 'st_mtime': self.timestamp, 'st_nlink': 1,
               'st_size': 0, 'st_uid': os.getuid() }
      
   def get_directories(self):
      if self.directories is None:
         if self.root is None:
            return ()
         else:
            return self.root.get_directories()
      else:
         return self.directories
      
   def get_files(self):
      if self.root is None:
         return ()
      else:
         return self.root.get_files()
      
   def add_file(self, file):
      if not self.root is None:
         self.root.add_file(self, file)
               
   def __str__(self):            
      s = "MTPStorage %s: id=%d, device=%s%s" % (self.name, self.id, self.open_device, os.linesep)      
      return s
         
   def find_entry(self, path):
      path = utf8(path)
      self.log.debug('find_entry(%s)' % (path,))
      try:
         if path.strip() == '':
            path = os.sep + self.name 
         entry = self.contents.get(path)
         if entry is None:         
            components = [comp for comp in path.split(os.sep) if len(comp.strip()) != 0]
            if len(components) == 0:
               return None
            if components[0] != self.name:
               raise LookupError('Invalid storage (expected %s, was %s)' % (self.name, components[0]))
            entry = self.__find_entry(self.root, components[1:])
         else:
            if entry.is_directory() and entry.must_refresh:
               entry.refresh()
         return entry
      except:
         self.log.exception("")
         return None
   
   def __find_entry(self, entry, components):
      self.log.debug("__find_entry(%s, %s)" % (entry, str(components)))
      if len(components) == 0:
         return entry
      name = components[0]
      path = entry.path + os.sep + name 
      en = self.contents.get(utf8(path))
      if not en is None:
         if en.is_directory() and en.must_refresh:
            en.refresh()
         return self.__find_entry(en, components[1:])
      en = entry.find_directory(name)
      if not en is None and en.is_directory():
         self.contents[utf8(path)] = en
         if en.must_refresh:
            en.refresh()
         return self.__find_entry(en, components[1:])
      return entry.find_file(name)
   
   def remove_entry(self, path):
      try:
         return self.contents.pop(utf8(path))
      except:
         #self.log.warn('MTPStorage.remove_entry: %s not found' % (path,))
         return None
      
   def refresh(self):
      if not self.root is None and self.must_refresh:
         self.must_refresh = not self.root.refresh()
   
   def close(self):
      pass
예제 #35
0
class MTPFS(LoggingMixIn, Operations):   
   def __init__(self, mtp, mountpoint, is_debug=False, logger=None):
      global VERBOSE
      self.mtp = mtp
      self.is_debug = is_debug
      self.tempdir = tempfile.mkdtemp(prefix='pymtpfs')
      if not bool(self.tempdir) or not os.path.exists(self.tempdir):
         self.tempdir = tempfile.gettempdir()
      self.read_timeout = 2
      self.write_timeout = 2      
      self.openfile_t = namedtuple('openfile', 'handle, path, mtp_path, readonly')
      self.openfiles = {}
      self.log = logger
      self.created = LRU(1000) 
      if VERBOSE:         
         print("Mounted %s on %s" % (self.mtp, ))
      self.log.info("Mounted %s on %s" % (self.mtp, mountpoint))
   
   def __openfile_by_path(self, path):
      return next((en for en in self.openfiles.values() if en.mtp_path == path), None)

   def destroy(self, path):
      self.mtp.close()
      for openfile in self.openfiles.values():
         try:
            os.close(openfile.handle)
         except:
            self.log.exception("")
      try:
         if self.tempdir != tempfile.gettempdir():
            shutil.rmtree(self.tempdir)
      except:
         self.log.exception("")
      return 0
      
   def chmod(self, path, mode):
      return 0

   def chown(self, path, uid, gid):
      return 0
   
#   @log_calls
   def getattr(self, path, fh=None):
      attrib = {}
      path = fix_path(path, self.log)
      entry = self.mtp.get_path(path)
      if entry is None:
         entry = self.created.get(path)
      if entry is None:         
         raise FuseOSError(errno.ENOENT)
      else:
         try:
            attrib = entry.get_attributes()
         except Exception, e:
            self.log.exception("")
            attrib = {}
            exmess = ""
            try:
               exmess = str(e.message)
            except:
               exmess = "Unknown"
            self.log.error('Error reading MTP attributes for %s (%s)' % (path, exmess))
            raise FuseOSError(errno.ENOENT)            
      return attrib      
예제 #36
0
class Network(Service, NetworkAPI):
    logger = logging.getLogger("ddht.Network")

    _bootnodes: Tuple[ENRAPI, ...]

    def __init__(
        self,
        client: ClientAPI,
        bootnodes: Collection[ENRAPI],
    ) -> None:
        self.client = client

        self._bootnodes = tuple(bootnodes)
        self.routing_table = KademliaRoutingTable(
            self.client.enr_manager.enr.node_id,
            ROUTING_TABLE_BUCKET_SIZE,
        )
        self._routing_table_ready = trio.Event()
        self._last_pong_at = LRU(2048)

    #
    # Proxied ClientAPI properties
    #
    @property
    def local_node_id(self) -> NodeID:
        return self.client.local_node_id

    @property
    def events(self) -> EventsAPI:
        return self.client.events

    @property
    def dispatcher(self) -> DispatcherAPI:
        return self.client.dispatcher

    @property
    def enr_manager(self) -> ENRManagerAPI:
        return self.client.enr_manager

    @property
    def pool(self) -> PoolAPI:
        return self.client.pool

    @property
    def enr_db(self) -> ENRDatabaseAPI:
        return self.client.enr_db

    #
    # High Level API
    #
    async def bond(self,
                   node_id: NodeID,
                   *,
                   endpoint: Optional[Endpoint] = None) -> bool:
        try:
            pong = await self.ping(node_id, endpoint=endpoint)
        except trio.TooSlowError:
            self.logger.debug("Bonding with %s timed out during ping",
                              humanize_node_id(node_id))
            return False

        try:
            enr = self.enr_db.get_enr(node_id)
        except KeyError:
            try:
                enr = await self.get_enr(node_id, endpoint=endpoint)
            except trio.TooSlowError:
                self.logger.debug(
                    "Bonding with %s timed out during ENR retrieval",
                    humanize_node_id(node_id),
                )
                return False
        else:
            if pong.enr_seq > enr.sequence_number:
                try:
                    enr = await self.get_enr(node_id, endpoint=endpoint)
                except trio.TooSlowError:
                    self.logger.debug(
                        "Bonding with %s timed out during ENR retrieval",
                        humanize_node_id(node_id),
                    )
                else:
                    self.enr_db.set_enr(enr)

        self.routing_table.update(enr.node_id)

        self._routing_table_ready.set()
        return True

    async def _bond(self, node_id: NodeID, endpoint: Endpoint) -> None:
        await self.bond(node_id, endpoint=endpoint)

    async def ping(self,
                   node_id: NodeID,
                   *,
                   endpoint: Optional[Endpoint] = None) -> PongMessage:
        if endpoint is None:
            endpoint = self._endpoint_for_node_id(node_id)
        response = await self.client.ping(endpoint, node_id)
        return response.message

    async def find_nodes(
        self,
        node_id: NodeID,
        *distances: int,
        endpoint: Optional[Endpoint] = None,
    ) -> Tuple[ENRAPI, ...]:
        if not distances:
            raise TypeError("Must provide at least one distance")

        if endpoint is None:
            endpoint = self._endpoint_for_node_id(node_id)
        responses = await self.client.find_nodes(endpoint,
                                                 node_id,
                                                 distances=distances)
        return tuple(enr for response in responses
                     for enr in response.message.enrs)

    async def get_enr(self,
                      node_id: NodeID,
                      *,
                      endpoint: Optional[Endpoint] = None) -> ENRAPI:
        enrs = await self.find_nodes(node_id, 0, endpoint=endpoint)
        if not enrs:
            raise Exception("Invalid response")
        # This reduce accounts for
        return _reduce_enrs(enrs)[0]

    async def recursive_find_nodes(self, target: NodeID) -> Tuple[ENRAPI, ...]:
        self.logger.debug("Recursive find nodes: %s", humanize_node_id(target))

        queried_node_ids = set()
        unresponsive_node_ids = set()
        received_enrs: List[ENRAPI] = []
        received_node_ids: Set[NodeID] = set()

        async def do_lookup(node_id: NodeID) -> None:
            queried_node_ids.add(node_id)

            distance = compute_log_distance(node_id, target)
            try:
                enrs = await self.find_nodes(node_id, distance)
            except trio.TooSlowError:
                unresponsive_node_ids.add(node_id)
                return

            for enr in enrs:
                received_node_ids.add(enr.node_id)
                try:
                    self.enr_db.set_enr(enr)
                except OldSequenceNumber:
                    received_enrs.append(self.enr_db.get_enr(enr.node_id))
                else:
                    received_enrs.append(enr)

        for lookup_round_counter in itertools.count():
            candidates = iter_closest_nodes(target, self.routing_table,
                                            received_node_ids)
            responsive_candidates = itertools.dropwhile(
                lambda node: node in unresponsive_node_ids, candidates)
            closest_k_candidates = take(self.routing_table.bucket_size,
                                        responsive_candidates)
            closest_k_unqueried_candidates = (
                candidate for candidate in closest_k_candidates
                if candidate not in queried_node_ids)
            nodes_to_query = tuple(take(3, closest_k_unqueried_candidates))

            if nodes_to_query:
                self.logger.debug(
                    "Starting lookup round %d for %s",
                    lookup_round_counter + 1,
                    humanize_node_id(target),
                )
                async with trio.open_nursery() as nursery:
                    for peer in nodes_to_query:
                        nursery.start_soon(do_lookup, peer)
            else:
                self.logger.debug(
                    "Lookup for %s finished in %d rounds",
                    humanize_node_id(target),
                    lookup_round_counter,
                )
                break

        # now sort and return the ENR records in order of closesness to the target.
        return tuple(
            sorted(
                _reduce_enrs(received_enrs),
                key=lambda enr: compute_distance(enr.node_id, target),
            ))

    #
    # Long Running Processes
    #
    async def run(self) -> None:
        self.manager.run_daemon_child_service(self.client)
        await self.client.wait_listening()

        self.manager.run_daemon_task(self._ping_oldest_routing_table_entry)
        self.manager.run_daemon_task(self._track_last_pong)
        self.manager.run_daemon_task(self._manage_routing_table)
        self.manager.run_daemon_task(self._pong_when_pinged)
        self.manager.run_daemon_task(self._serve_find_nodes)

        await self.manager.wait_finished()

    async def _periodically_report_routing_table(self) -> None:
        async for _ in every(30, initial_delay=30):
            non_empty_buckets = tuple((idx, bucket)
                                      for idx, bucket in enumerate(
                                          reversed(self.routing_table.buckets))
                                      if bucket)
            total_size = sum(len(bucket) for idx, bucket in non_empty_buckets)
            bucket_info = "|".join(
                tuple(f"{idx}:{len(bucket)}"
                      for idx, bucket in non_empty_buckets))
            self.logger.debug(
                "routing-table-info: size=%d  buckets=%s",
                total_size,
                bucket_info,
            )

    async def _ping_oldest_routing_table_entry(self) -> None:
        await self._routing_table_ready.wait()

        while self.manager.is_running:
            # Here we preserve the lazy iteration while still checking that the
            # iterable is not empty before passing it into `min` below which
            # throws an ambiguous `ValueError` otherwise if the iterable is
            # empty.
            nodes_iter = self.routing_table.iter_all_random()
            try:
                first_node_id = first(nodes_iter)
            except StopIteration:
                await trio.sleep(ROUTING_TABLE_KEEP_ALIVE)
                continue
            else:
                least_recently_ponged_node_id = min(
                    cons(first_node_id, nodes_iter),
                    key=lambda node_id: self._last_pong_at.get(node_id, 0),
                )

            too_old_at = trio.current_time() - ROUTING_TABLE_KEEP_ALIVE
            try:
                last_pong_at = self._last_pong_at[
                    least_recently_ponged_node_id]
            except KeyError:
                pass
            else:
                if last_pong_at > too_old_at:
                    await trio.sleep(last_pong_at - too_old_at)
                    continue

            did_bond = await self.bond(least_recently_ponged_node_id)
            if not did_bond:
                self.routing_table.remove(least_recently_ponged_node_id)

    async def _track_last_pong(self) -> None:
        async with self.dispatcher.subscribe(PongMessage) as subscription:
            async for message in subscription:
                self._last_pong_at[
                    message.sender_node_id] = trio.current_time()

    async def _manage_routing_table(self) -> None:
        # First load all the bootnode ENRs into our database
        for enr in self._bootnodes:
            try:
                self.enr_db.set_enr(enr)
            except OldSequenceNumber:
                pass

        # Now repeatedly try to bond with each bootnode until one succeeds.
        async with trio.open_nursery() as nursery:
            while self.manager.is_running:
                for enr in self._bootnodes:
                    if enr.node_id == self.local_node_id:
                        continue
                    endpoint = self._endpoint_for_enr(enr)
                    nursery.start_soon(self._bond, enr.node_id, endpoint)

                with trio.move_on_after(10):
                    await self._routing_table_ready.wait()
                    break

        # TODO: Need better logic here for more quickly populating the
        # routing table.  Should start off aggressively filling in the
        # table, only backing off once the table contains some minimum
        # number of records **or** searching for new records fails to find
        # new nodes.  Maybe use a TokenBucket
        async for _ in every(30):
            async with trio.open_nursery() as nursery:
                target_node_id = NodeID(secrets.token_bytes(32))
                found_enrs = await self.recursive_find_nodes(target_node_id)
                for enr in found_enrs:
                    endpoint = self._endpoint_for_enr(enr)
                    nursery.start_soon(self._bond, enr.node_id, endpoint)

    async def _pong_when_pinged(self) -> None:
        async with self.dispatcher.subscribe(PingMessage) as subscription:
            async for request in subscription:
                await self.dispatcher.send_message(
                    request.to_response(
                        PongMessage(
                            request.message.request_id,
                            self.enr_manager.enr.sequence_number,
                            request.sender_endpoint.ip_address,
                            request.sender_endpoint.port,
                        )))

    async def _serve_find_nodes(self) -> None:
        async with self.dispatcher.subscribe(FindNodeMessage) as subscription:
            async for request in subscription:
                response_enrs: List[ENRAPI] = []
                distances = set(request.message.distances)
                if len(distances) != len(request.message.distances):
                    self.logger.debug(
                        "Ignoring invalid FindNodeMessage from %s@%s: duplicate distances",
                        humanize_node_id(request.sender_node_id),
                        request.sender_endpoint,
                    )
                    continue
                elif not distances:
                    self.logger.debug(
                        "Ignoring invalid FindNodeMessage from %s@%s: empty distances",
                        humanize_node_id(request.sender_node_id),
                        request.sender_endpoint,
                    )
                    continue
                elif any(distance > self.routing_table.num_buckets
                         for distance in distances):
                    self.logger.debug(
                        "Ignoring invalid FindNodeMessage from %s@%s: distances: %s",
                        humanize_node_id(request.sender_node_id),
                        request.sender_endpoint,
                        distances,
                    )
                    continue

                for distance in distances:
                    if distance == 0:
                        response_enrs.append(self.enr_manager.enr)
                    elif distance <= self.routing_table.num_buckets:
                        node_ids_at_distance = self.routing_table.get_nodes_at_log_distance(
                            distance, )
                        for node_id in node_ids_at_distance:
                            response_enrs.append(self.enr_db.get_enr(node_id))
                    else:
                        raise Exception("Should be unreachable")

                await self.client.send_found_nodes(
                    request.sender_endpoint,
                    request.sender_node_id,
                    enrs=response_enrs,
                    request_id=request.message.request_id,
                )

    #
    # Utility
    #
    def _endpoint_for_enr(self, enr: ENRAPI) -> Endpoint:
        try:
            ip_address = enr[IP_V4_ADDRESS_ENR_KEY]
            port = enr[UDP_PORT_ENR_KEY]
        except KeyError:
            raise Exception("Missing endpoint address information: ")

        return Endpoint(ip_address, port)

    def _endpoint_for_node_id(self, node_id: NodeID) -> Endpoint:
        enr = self.enr_db.get_enr(node_id)
        return self._endpoint_for_enr(enr)
예제 #37
0
class Osu(Cog):
    """osu! API commands."""
    def __init__(self, config, attr):
        self.attr = attr
        self.api_key = config.register("api_key")
        self.osuapi = osuapi.OsuApi(
            self.api_key(),
            connector=osuapi.AHConnector(
                aiohttp.ClientSession(loop=asyncio.get_event_loop())))
        self._osu_presence_username_cache = LRU(4 << 10)

        self._beatmap_cache = LRU(256)

    def cog_unload(self):
        self.osuapi.close()

    async def _set_osu_username(self, user, username):
        """Set :user's osu account to :username. Returns api result."""
        osu_acct = await self._lookup_acct(username)

        await self.attr.set_attributes(user, osu_id=osu_acct.user_id)

        return osu_acct

    async def _lookup_acct(self, username, mode=OsuMode.osu):
        res = await self.osuapi.get_user(username, mode=mode)

        if len(res) == 0:
            raise errors.BadArgument(
                "There is no osu user by the name {}".format(username))

        return res[0]

    @Cog.listener()
    async def on_member_update(self, before, member):
        if member.activity and member.activity.name == "osu!" and isinstance(
                member.activity, discord.Activity):
            rp = OsuRichPresence(member.activity)
            if rp.username:
                self._osu_presence_username_cache[member.id] = rp.username

    @command()
    async def setosu(self, ctx, *, username: str):
        """Set your osu account to be remembered by the bot."""
        with ctx.typing():
            osu_acct = await self._set_osu_username(ctx.message.author,
                                                    username)

        await ctx.send(
            "OK, set your osu account to {0.username} ({0.user_id})".format(
                osu_acct))

    @command(aliases=['osuwatch'])
    async def watchosu(self, ctx, *, account: StringOrMentionConverter = None):
        """Shows a osu spectate link

        Use + to give a raw account name. e.g.:
        osu +cookiezi
        osu @ppy
        """
        account = account or ctx.message.author

        if isinstance(account, discord.abc.User):
            user_osu_id = await self.attr.get_attribute(account, 'osu_id')

            if user_osu_id is None:
                await ctx.send("I don't know your osu name! "
                               "Use {}setosu <name> to set it!".format(
                                   ctx.prefix))
                return
        else:
            user_osu_id = account

        await ctx.send("<osu://spectate/{}>".format(user_osu_id))

    async def _get_osu_account(self, ctx, user, mode):
        osu_user_id = await self.attr.get_attribute(user, 'osu_id')

        if osu_user_id:
            return await self._lookup_acct(osu_user_id, mode=mode)

        if ctx.author.id != user.id:
            raise errors.BadArgument(
                "I don't know {}'s osu username!".format(user))

        presence_username = self._osu_presence_username_cache.get(user.id)

        clean_prefix = utils.clean_double_backtick(ctx.prefix)

        if presence_username:
            await ctx.send(
                "I don't know your osu username! I'm setting your osu username "
                "to {}, which rich presence showed you recently playing as. "
                "If this is wrong use ``{}setosu <username>``".format(
                    presence_username, clean_prefix))
            return await self._set_osu_username(user, presence_username)

        await ctx.send(
            "I don't know your osu username! I'm setting your osu username "
            "to {}, if this is wrong use ``{}setosu <username>``".format(
                user.name, clean_prefix))
        return await self._set_osu_username(user, user.name)

    async def _get_beatmap(self, beatmap_id):
        if beatmap_id in self._beatmap_cache:
            return self._beatmap_cache[beatmap_id]
        beatmaps = await self.osuapi.get_beatmaps(beatmap_id=beatmap_id)
        if not beatmaps:
            return None
        self._beatmap_cache[beatmap_id] = beatmaps[0]
        return beatmaps[0]

    @command(aliases=['taikorecent', 'ctbrecent', 'maniarecent'])
    async def osurecent(self,
                        ctx,
                        *,
                        account: StringOrMentionConverter = None):
        """Show a user's recent osu plays.

        Use + to give a raw account name. e.g.:
        osu +cookiezi
        osu @ppy
        """
        account = account or ctx.message.author

        mode = {
            'osurecent': OsuMode.osu,
            'taikorecent': OsuMode.taiko,
            'maniarecent': OsuMode.mania,
            'ctbrecent': OsuMode.ctb
        }[ctx.invoked_with]

        with ctx.typing():
            if account is None:
                raise errors.BadArgument("Invalid mention...!")

            if isinstance(account, discord.abc.User):
                osu_acct = await self._get_osu_account(ctx, account, mode)
            else:
                osu_acct = await self._lookup_acct(account, mode=mode)

            recent_scores = await self.osuapi.get_user_recent(osu_acct.user_id,
                                                              mode=mode)

        embed = discord.Embed()

        embed = discord.Embed()
        embed.title = osu_acct.username
        embed.url = "https://osu.ppy.sh/u/%s" % osu_acct.user_id
        embed.color = hash(str(osu_acct.user_id)) % (1 << 24)
        if isinstance(account, discord.abc.User):
            embed.set_author(
                name=str(account),
                icon_url=account.avatar_url_as(static_format="png"))

        if not recent_scores:
            embed.description = "%s hasn't played %s recently" % (
                osu_acct.username, mode.name)
        else:
            map_descriptions = []

            expected_len = 0

            for score in recent_scores:
                beatmap = await self._get_beatmap(score.beatmap_id)
                if not beatmap:
                    continue

                entry = (
                    "**{rank}{mods} - {score.score:,} ({percent:.2f}%) {score.maxcombo}x - {map.difficultyrating:.2f} Stars** - {ago}\n"
                    "[{map.artist} - {map.title}[{map.version}]]({map.url}) by [{map.creator}](https://osu.ppy.sh/u/{map.creator_id})"
                ).format(rank=score.rank.upper(),
                         mods=" +{:s}".format(score.enabled_mods)
                         if score.enabled_mods.value else "",
                         percent=100 * score.accuracy(mode),
                         ago=humanize.naturaltime(score.date + DATE_OFFSET),
                         score=score,
                         map=beatmap)

                if expected_len + len(entry) + 1 <= 2048:
                    map_descriptions.append(entry)
                    expected_len += len(entry) + 1
                else:
                    break

            embed.description = "\n".join(map_descriptions)

        await ctx.send(embed=embed)

    @command(pass_context=True, aliases=['taiko', 'ctb', 'mania'])
    async def osu(self, ctx, *, account: StringOrMentionConverter = None):
        """Show a user's osu profile.

        Use + to give a raw account name. e.g.:
        osu +cookiezi
        osu @ppy
        """
        account = account or ctx.message.author

        mode = {
            'osu': OsuMode.osu,
            'taiko': OsuMode.taiko,
            'mania': OsuMode.mania,
            'ctb': OsuMode.ctb
        }[ctx.invoked_with]

        with ctx.typing():
            if account is None:
                raise errors.BadArgument("Invalid mention...!")

            if isinstance(account, discord.abc.User):
                osu_acct = await self._get_osu_account(ctx, account, mode)
            else:
                osu_acct = await self._lookup_acct(account, mode=mode)

            usrscore = await self.osuapi.get_user_best(osu_acct.user_id,
                                                       limit=100,
                                                       mode=mode)

        embed = discord.Embed()
        embed.title = osu_acct.username
        embed.url = "https://osu.ppy.sh/u/%s" % osu_acct.user_id
        embed.color = hash(str(osu_acct.user_id)) % (1 << 24)
        if isinstance(account, discord.abc.User):
            embed.set_author(
                name=str(account),
                icon_url=account.avatar_url_as(static_format="png"))
        embed.set_thumbnail(url="http://a.ppy.sh/%s?_=%s" %
                            (osu_acct.user_id, time.time()))

        if not usrscore:
            embed.description = "%s has never played %s" % (osu_acct.username,
                                                            ctx.invoked_with)
        else:
            embed.description = "#{0.pp_rank:,} ({0.pp_raw} pp)".format(
                osu_acct)
            fave_mod = collections.Counter(
                play.enabled_mods for play in usrscore).most_common()[0][0]
            bplay = usrscore[0]
            embed.add_field(name="Plays",
                            value="{:,}".format(osu_acct.playcount))
            embed.add_field(name="Hits",
                            value="{:,}".format(osu_acct.total_hits))
            embed.add_field(name="Acc",
                            value="{:.2f}".format(osu_acct.accuracy))
            embed.add_field(name="Best Play",
                            value="{:,}pp {:s}".format(bplay.pp,
                                                       bplay.enabled_mods))
            embed.add_field(name="Favorite Mod", value="{:l}".format(fave_mod))

        await ctx.send(embed=embed)
예제 #38
0
class PolygonIndex(object):
    include_only_properties = None
    simplify_tolerance = 0.0001
    preserve_topology = True
    persistent_polygons = False
    cache_size = 0
    fix_invalid_polygons = False

    INDEX_FILENAME = None
    POLYGONS_DB_DIR = 'polygons'

    def __init__(self, index=None, polygons=None, polygons_db=None, save_dir=None,
                 index_filename=None,
                 polygons_db_path=None,
                 include_only_properties=None):
        if save_dir:
            self.save_dir = save_dir
        else:
            self.save_dir = None

        if not index_filename:
            index_filename = self.INDEX_FILENAME

        self.index_path = os.path.join(save_dir or '.', index_filename)

        if not index:
            self.create_index(overwrite=True)
        else:
            self.index = index

        if include_only_properties and hasattr(include_only_properties, '__contains__'):
            self.include_only_properties = include_only_properties

        if not polygons and not self.persistent_polygons:
            self.polygons = {}
        elif polygons and not self.persistent_polygons:
            self.polygons = polygons
        elif self.persistent_polygons and self.cache_size > 0:
            self.polygons = LRU(self.cache_size)
            if polygons:
                for key, value in six.iteritems(polygons):
                    self.polygons[key] = value

            self.cache_hits = 0
            self.cache_misses = 0

            self.get_polygon = self.get_polygon_cached

        if not polygons_db_path:
            polygons_db_path = os.path.join(save_dir or '.', self.POLYGONS_DB_DIR)

        if not polygons_db:
            self.polygons_db = LevelDB(polygons_db_path)
        else:
            self.polygons_db = polygons_db

        self.setup()

        self.i = 0

    def create_index(self, overwrite=False):
        raise NotImplementedError('Children must implement')

    def index_polygon(self, polygon):
        raise NotImplementedError('Children must implement')

    def setup(self):
        pass

    def clear_cache(self, garbage_collect=True):
        if self.persistent_polygons and self.cache_size > 0:
            self.polygons.clear()
            if garbage_collect:
                gc.collect()

    def simplify_polygon(self, poly, simplify_tolerance=None, preserve_topology=None):
        if simplify_tolerance is None:
            simplify_tolerance = self.simplify_tolerance
        if preserve_topology is None:
            preserve_topology = self.preserve_topology
        return poly.simplify(simplify_tolerance, preserve_topology=preserve_topology)

    def index_polygon_properties(self, properties):
        pass

    def polygon_geojson(self, poly, properties):
        return {
            'type': 'Feature',
            'geometry': mapping(poly),
        }

    def add_polygon(self, poly, properties, cache=False, include_only_properties=None):
        if include_only_properties is not None:
            properties = {k: v for k, v in properties.iteritems() if k in include_only_properties}

        if not self.persistent_polygons or cache:
            self.polygons[self.i] = prep(poly)

        if self.persistent_polygons:
            self.polygons_db.Put(self.polygon_key(self.i), json.dumps(self.polygon_geojson(poly, properties)))

        self.polygons_db.Put(self.properties_key(self.i), json.dumps(properties))
        self.index_polygon_properties(properties)
        self.i += 1

    @classmethod
    def create_from_shapefiles(cls, inputs, output_dir,
                               index_filename=None,
                               include_only_properties=None):
        index = cls(save_dir=output_dir, index_filename=index_filename or cls.INDEX_FILENAME)
        for input_file in inputs:
            if include_only_properties is not None:
                include_props = include_only_properties.get(input_file, cls.include_only_properties)
            else:
                include_props = cls.include_only_properties

            f = fiona.open(input_file)

            index.add_geojson_like_file(f)

        return index

    @classmethod
    def fix_polygon(cls, poly):
        '''
        Coerce to valid polygon
        '''
        if not poly.is_valid:
            poly = poly.buffer(0)
            if not poly.is_valid:
                return None
        return poly

    @classmethod
    def to_polygon(cls, coords, holes=None, test_point=None):
        '''
        Create shapely polygon from list of coordinate tuples if valid
        '''
        if not coords or len(coords) < 3:
            return None

        # Fix for polygons crossing the 180th meridian
        lons = [lon for lon, lat in coords]
        if (max(lons) - min(lons) > 180):
            coords = [(lon + 360.0 if lon < 0 else lon, lat) for lon, lat in coords]
            if holes:
                holes = [(lon + 360.0 if lon < 0 else lon, lat) for lon, lat in holes]

        poly = Polygon(coords, holes)
        try:
            if test_point is None:
                test_point = poly.representative_point()
            invalid = cls.fix_invalid_polygons and not poly.is_valid and not poly.contains(test_point)
        except Exception:
            invalid = True

        if invalid:
            try:
                poly_fix = cls.fix_polygon(poly)

                if poly_fix is not None and poly_fix.bounds and len(poly_fix.bounds) == 4 and poly_fix.is_valid and poly_fix.type == poly.type:
                    if test_point is None:
                        test_point = poly_fix.representative_point()

                    if poly_fix.contains(test_point):
                        poly = poly_fix
            except Exception:
                pass

        return poly

    def add_geojson_like_record(self, rec, include_only_properties=None):
        if not rec or not rec.get('geometry') or 'type' not in rec['geometry']:
            return
        poly_type = rec['geometry']['type']
        if poly_type == 'Polygon':
            coords = rec['geometry']['coordinates'][0]
            poly = self.to_polygon(coords)
            if poly is None or not poly.bounds or len(poly.bounds) != 4:
                return
            self.index_polygon(poly)
            self.add_polygon(poly, rec['properties'], include_only_properties=include_only_properties)
        elif poly_type == 'MultiPolygon':
            polys = []
            poly_coords = rec['geometry']['coordinates']
            for coords in poly_coords:
                poly = self.to_polygon(coords[0])
                if poly is None or not poly.bounds or len(poly.bounds) != 4:
                    continue
                polys.append(poly)
                self.index_polygon(poly)

            self.add_polygon(MultiPolygon(polys), rec['properties'], include_only_properties=include_only_properties)
        else:
            return

    def add_geojson_like_file(self, f, include_only_properties=None):
        '''
        Add either GeoJSON or a shapefile record to the index
        '''

        for rec in f:
            self.add_geojson_like_record(rec, include_only_properties=include_only_properties)

    @classmethod
    def create_from_geojson_files(cls, inputs, output_dir,
                                  index_filename=None,
                                  polys_filename=DEFAULT_POLYS_FILENAME,
                                  include_only_properties=None):
        index = cls(save_dir=output_dir, index_filename=index_filename or cls.INDEX_FILENAME)
        for input_file in inputs:
            if include_only_properties is not None:
                include_props = include_only_properties.get(input_file, cls.include_only_properties)
            else:
                include_props = cls.include_only_properties

            f = json.load(open(input_file))

            index.add_geojson_like_file(f['features'], include_only_properties=include_props)

        return index

    def compact_polygons_db(self):
        self.polygons_db.CompactRange('\x00', '\xff')

    def save(self):
        self.save_index()
        self.save_properties(os.path.join(self.save_dir, DEFAULT_PROPS_FILENAME))
        if not self.persistent_polygons:
            self.save_polygons(os.path.join(self.save_dir, DEFAULT_POLYS_FILENAME))
        self.compact_polygons_db()
        self.save_polygon_properties(self.save_dir)

    def load_properties(self, filename):
        properties = json.load(open(filename))
        self.i = int(properties.get('num_polygons', self.i))

    def save_properties(self, out_filename):
        out = open(out_filename, 'w')
        json.dump({'num_polygons': str(self.i)}, out)

    def save_polygons(self, out_filename):
        out = open(out_filename, 'w')
        for i in xrange(self.i):
            poly = self.polygons[i]
            feature = {
                'type': 'Feature',
                'geometry': mapping(poly.context),
            }
            out.write(json.dumps(feature) + u'\n')

    def save_index(self):
        raise NotImplementedError('Children must implement')

    def load_polygon_properties(self, d):
        pass

    def save_polygon_properties(self, d):
        pass

    @classmethod
    def polygon_from_geojson(cls, feature):
        poly_type = feature['geometry']['type']
        if poly_type == 'Polygon':
            coords = feature['geometry']['coordinates']
            poly = cls.to_polygon(coords[0], holes=coords[1:] or None)
            return poly
        elif poly_type == 'MultiPolygon':
            polys = []
            for coords in feature['geometry']['coordinates']:
                poly = cls.to_polygon(coords[0], holes=coords[1:] or None)
                polys.append(poly)

            return MultiPolygon(polys)

    @classmethod
    def load_polygons(cls, filename):
        f = open(filename)
        polygons = {}
        cls.i = 0
        for line in f:
            feature = json.loads(line.rstrip())
            polygons[cls.i] = prep(cls.polygon_from_geojson(feature))
            cls.i += 1
        return polygons

    @classmethod
    def load_index(cls, d, index_name=None):
        raise NotImplementedError('Children must implement')

    @classmethod
    def load(cls, d, index_name=None, polys_filename=DEFAULT_POLYS_FILENAME,
             properties_filename=DEFAULT_PROPS_FILENAME,
             polys_db_dir=POLYGONS_DB_DIR):
        index = cls.load_index(d, index_name=index_name or cls.INDEX_FILENAME)
        if not cls.persistent_polygons:
            polys = cls.load_polygons(os.path.join(d, polys_filename))
        else:
            polys = None
        polygons_db = LevelDB(os.path.join(d, polys_db_dir))
        polygon_index = cls(index=index, polygons=polys, polygons_db=polygons_db, save_dir=d)
        polygon_index.load_properties(os.path.join(d, properties_filename))
        polygon_index.load_polygon_properties(d)
        return polygon_index

    def get_candidate_polygons(self, lat, lon):
        raise NotImplementedError('Children must implement')

    def get_properties(self, i):
        return json.loads(self.polygons_db.Get(self.properties_key(i)))

    def get_polygon(self, i):
        return self.polygons[i]

    def get_polygon_cached(self, i):
        poly = self.polygons.get(i, None)
        if poly is None:
            data = json.loads(self.polygons_db.Get(self.polygon_key(i)))
            poly = prep(self.polygon_from_geojson(data))
            self.polygons[i] = poly
            self.cache_misses += 1
        else:
            self.cache_hits += 1
        return poly

    def __iter__(self):
        for i in xrange(self.i):
            yield self.get_properties(i), self.get_polygon(i)

    def __len__(self):
        return self.i

    def polygons_contain(self, candidates, point, return_all=False):
        containing = None
        if return_all:
            containing = []
        for i in candidates:
            poly = self.get_polygon(i)
            contains = poly.contains(point)
            if contains:
                properties = self.get_properties(i)
                if not return_all:
                    return properties
                else:
                    containing.append(properties)
        return containing

    def polygon_key(self, i):
        return 'poly:{}'.format(i)

    def properties_key(self, i):
        return 'props:{}'.format(i)

    def point_in_poly(self, lat, lon, return_all=False):
        candidates = self.get_candidate_polygons(lat, lon)
        point = Point(lon, lat)
        return self.polygons_contain(candidates, point, return_all=return_all)
예제 #39
0
class MTPFS(LoggingMixIn, Operations):
    def __init__(self, mtp, mountpoint, is_debug=False, logger=None):
        global VERBOSE
        self.mtp = mtp
        self.is_debug = is_debug
        self.tempdir = tempfile.mkdtemp(prefix='pymtpfs')
        if not bool(self.tempdir) or not os.path.exists(self.tempdir):
            self.tempdir = tempfile.gettempdir()
        self.read_timeout = 2
        self.write_timeout = 2
        self.openfile_t = namedtuple('openfile',
                                     'handle, path, mtp_path, readonly')
        self.openfiles = {}
        self.log = logger
        self.created = LRU(1000)
        if VERBOSE:
            print("Mounted %s on %s" % (self.mtp, ))
        self.log.info("Mounted %s on %s" % (self.mtp, mountpoint))

    def __openfile_by_path(self, path):
        return next(
            (en for en in self.openfiles.values() if en.mtp_path == path),
            None)

    def destroy(self, path):
        self.mtp.close()
        for openfile in self.openfiles.values():
            try:
                os.close(openfile.handle)
            except:
                self.log.exception("")
        try:
            if self.tempdir != tempfile.gettempdir():
                shutil.rmtree(self.tempdir)
        except:
            self.log.exception("")
        return 0

    def chmod(self, path, mode):
        return 0

    def chown(self, path, uid, gid):
        return 0

#   @log_calls

    def getattr(self, path, fh=None):
        attrib = {}
        path = fix_path(path, self.log)
        entry = self.mtp.get_path(path)
        if entry is None:
            entry = self.created.get(path)
        if entry is None:
            raise FuseOSError(errno.ENOENT)
        else:
            try:
                attrib = entry.get_attributes()
            except Exception, e:
                self.log.exception("")
                attrib = {}
                exmess = ""
                try:
                    exmess = str(e.message)
                except:
                    exmess = "Unknown"
                self.log.error('Error reading MTP attributes for %s (%s)' %
                               (path, exmess))
                raise FuseOSError(errno.ENOENT)
        return attrib
예제 #40
0
class Cache:

    # Replacement policies
    LRU = "LRU"
    FIFO = 'FIFO'

    def __init__(self, name, size, policy):
        self.name = name
        self.size = size
        self.free_space = size
        self.policy = policy  # Eviction policy
        self.hashmap = {}  # Mapping <objname,objsize>

        if (self.policy == Cache.LRU):
            self.cache = LRU(self.size)
        elif (self.policy == Cache.FIFO):
            self.cache = queue.Queue(maxsize=self.size)

        # Statistics
        self.hit_count = 0
        self.miss_count = 0

    def has_key(self, key):
        if key in self.hashmap.keys():
            return True
        else:
            return False

    def update(self, key, size):
        self.hashmap[key] = size
        self.hit_count += 1
        if (self.policy == Cache.LRU):
            self.cache.update(key=size)
        elif (self.policy == Cache.FIFO):
            self.cache.put(key)

    def insert(self, key, size, directory):
        if (self.policy == Cache.LRU):
            self.insertLRU(key, size, directory)
        elif (self.policy == Cache.FIFO):
            self.insertFIFO(key, size, directory)

    def evictLRU(self, directory):
        oid = self.cache.peek_last_item()[0]
        directory.removeBlock(oid, self.name)
        del [oid]
        del self.hashmap[oid]
        self.free_space += int(self.hashmap[oid])

    def evictFIFO(self, directory):
        oid = self.cache.get()
        directory.removeBlock(oid, self.name)
        self.free_space += int(self.hashmap[oid])
        del self.hashmap[oid]

    def insertLRU(self, key, size, directory):
        while (int(size) >= self.free_space):
            self.evictLRU(directory)
        self.cache[key] = size
        self.hashmap[key] = size
        self.free_space += size
        self.miss_count += 1

    def insertFIFO(self, key, size, directory):
        while (int(size) >= self.free_space):
            self.evictFIFO(directory)
        self.cache.put(key)
        self.hashmap[key] = size
        self.free_space += size
        self.miss_count += 1

    def put(self, key, size, directory):
        if self.has_key(key):
            self.update(key, size)
        else:
            self.insert(key, size, directory)

    def print(self):
        if (self.policy == Cache.LRU):
            print(self.name, "LRU", self.hashmap, self.cache.items())
        elif (self.policy == Cache.FIFO):
            print(self.name, "LRU", self.hashmap, list(self.cache.queue))

    def remove(self, key):
        del self.hashmap[key]
        if (self.policy == Cache.LRU):
            del self.cache[key]
        elif (self.policy == Cache.FIFO):
            a = 5
예제 #41
0
            session.close()

            df_relevant_posts = get_relevant_posts(ssh, db_host, db_user,
                                                   db_password, db_port,
                                                   database_name, ssh_username,
                                                   ssh_password)
            # print(df_relevant_posts)
            relevant_posts = df_relevant_posts.to_dict('records')
            logger.info(f'Relevant posts: {len(relevant_posts)}')
            posts_to_update = []

            for relevant_post in relevant_posts:
                # message_id, conversation_replies, likes_total
                current_id = relevant_post['message_id']
                values = {}
                current_post_prev_version = processed_posts.get(
                    current_id, None)

                if relevant_post != current_post_prev_version:
                    processed_posts[current_id] = relevant_post
                    posts_to_update.append(
                        (current_id, compute_impact(relevant_post),
                         relevant_post['post_type']))

            # Even if the post doesn't change, we may want to recalculate the impact every several seconds
            delta_since_last_recalculation_ms = get_utc_now(
            ) - last_impact_recalculation_epoch_ms
            logger.info(
                f'Posts to update: {len(posts_to_update)}. Milisecs since last recalculation: {delta_since_last_recalculation_ms}'
            )

            if len(