예제 #1
0
    def test_lru(self):
        l = LRU(1)
        l['a'] = 1
        l['a']
        self.assertEqual(l.keys(), ['a'])
        l['b'] = 2
        self.assertEqual(l.keys(), ['b'])

        l = LRU(2)
        l['a'] = 1
        l['b'] = 2
        self.assertEqual(len(l), 2)
        l['a']                  # Testing the first one
        l['c'] = 3
        self.assertEqual(sorted(l.keys()), ['a', 'c'])
        l['c']
        self.assertEqual(sorted(l.keys()), ['a', 'c'])

        l = LRU(3)
        l['a'] = 1
        l['b'] = 2
        l['c'] = 3
        self.assertEqual(len(l), 3)
        l['b']                  # Testing the middle one
        l['d'] = 4
        self.assertEqual(sorted(l.keys()), ['b', 'c', 'd'])
        l['d']                  # Testing the last one
        self.assertEqual(sorted(l.keys()), ['b', 'c', 'd'])
        l['e'] = 5
        self.assertEqual(sorted(l.keys()), ['b', 'd', 'e'])
예제 #2
0
    def test_lru(self):
        l = LRU(1)
        l["a"] = 1
        l["a"]
        self.assertEqual(l.keys(), ["a"])
        l["b"] = 2
        self.assertEqual(l.keys(), ["b"])

        l = LRU(2)
        l["a"] = 1
        l["b"] = 2
        self.assertEqual(len(l), 2)
        l["a"]  # Testing the first one
        l["c"] = 3
        self.assertEqual(sorted(l.keys()), ["a", "c"])
        l["c"]
        self.assertEqual(sorted(l.keys()), ["a", "c"])

        l = LRU(3)
        l["a"] = 1
        l["b"] = 2
        l["c"] = 3
        self.assertEqual(len(l), 3)
        l["b"]  # Testing the middle one
        l["d"] = 4
        self.assertEqual(sorted(l.keys()), ["b", "c", "d"])
        l["d"]  # Testing the last one
        self.assertEqual(sorted(l.keys()), ["b", "c", "d"])
        l["e"] = 5
        self.assertEqual(sorted(l.keys()), ["b", "d", "e"])
예제 #3
0
파일: manager.py 프로젝트: Rhoana/mb
    def __init__(self):
        '''
        '''
        self._views = LRU(50)
        # tile cache - enough for 1 MFOV for 10 parallel users
        self._tiles = LRU(61 * 10)

        self._client_tiles = {}
예제 #4
0
 def test_access_within_size(self):
     for size in SIZES:
         l = LRU(size)
         for i in xrange(size):
             l[i] = str(i)
         for i in xrange(size):
             self.assertEquals(l[i], str(i))
             self.assertEquals(l.get(i,None), str(i))
예제 #5
0
 def test_has_key(self):
     for size in SIZES:
         l = LRU(size)
         for i in xrange(2*size):
             l[i] = str(i)
             self.assertTrue(l.has_key(i))
         for i in xrange(size, 2*size):
             self.assertTrue(l.has_key(i))
         for i in xrange(size):
             self.assertFalse(l.has_key(i))
예제 #6
0
 def test_get_and_del(self):
     l = LRU(2)
     l[1] = '1'
     self.assertEqual('1', l.get(1))
     self.assertEqual('1', l.get(2, '1'))
     self.assertIsNone(l.get(2))
     self.assertEqual('1', l[1])
     self.assertRaises(KeyError, lambda: l['2'])
     with self.assertRaises(KeyError):
         del l['2']
예제 #7
0
 def test_get_and_del(self):
     l = LRU(2)
     l[1] = "1"
     self.assertEqual("1", l.get(1))
     self.assertEqual("1", l.get(2, "1"))
     self.assertIsNone(l.get(2))
     self.assertEqual("1", l[1])
     self.assertRaises(KeyError, lambda: l["2"])
     with self.assertRaises(KeyError):
         del l["2"]
예제 #8
0
 def test_access(self):
     for size in SIZES:
         l = LRU(size)
         n = size * 2
         for i in xrange(n):
             l[i] = str(i)
         self._check_kvi(range(n-1,size-1,-1), l)
         for i in xrange(size, n):
             self.assertEquals(l[i], str(i))
             self.assertEquals(l.get(i,None), str(i))
예제 #9
0
 def __init__(self, c_hash, c_user, c_words):
     self.topic_count =1
     # self.time = (self.first,self.last)
     self.l1 = LRU(c_hash)
     self.first =""
     self.last=""
     self.lats=[]
     self.longs=[]
     self.l2 = LRU(c_user)
     self.l3 = LRU(c_words)
     self.l4 = LRU(400)
예제 #10
0
 def test_clear(self):
     for size in SIZES:
         l = LRU(size)
         for i in range(size+5):
             l[i] = str(i)
         l.clear()
         for i in range(size):
             l[i] = str(i)
         for i in xrange(size):
             _ = l[random.randint(0, size-1)]
         l.clear()
         self.assertTrue(len(l) == 0)
예제 #11
0
파일: mtp.py 프로젝트: donaldmunro/pymtpfs
 def __init__(self, mtp, pstorage=None):
    global PATH_CACHE_SIZE
    MTPRefresh.__init__(self)
    self.mtp = mtp
    self.libmtp = mtp.libmtp
    self.open_device = mtp.open_device
    self.directories = None
    self.contents = LRU(PATH_CACHE_SIZE)
    if pstorage is None:
       MTPEntry.__init__(self, -3, '/')
       self.storage = None
       self.directories = []
       for dirname in self.mtp.get_storage_descriptions():
          #def __init__(self, path, id=-2, storageid=-2, folderid=-2, mtp=None, timestamp=0, is_refresh=True):
          self.directories.append(MTPFolder(path=dirname, id= -3, storageid= -3, folderid= -2, is_refresh=False)) 
       self.root = None
       self.contents[utf8(os.sep)] = self
    else:         
       self.storage = pstorage
       storage = pstorage.contents
       self.type = storage.StorageType
       self.freespace = storage.FreeSpaceInBytes
       self.capacity = storage.MaxCapacity
       path = os.sep + storage.StorageDescription
       MTPEntry.__init__(self, storage.id, path, storageid=None, folderid=0)
       self.root = MTPFolder(path=path, id=0, storageid=storage.id, folderid=0, mtp=self.mtp)
       self.contents[utf8(path)] = self.root
예제 #12
0
파일: drawings.py 프로젝트: recunius/iodraw
class Drawings:

    def __init__(self, max_active):
        self.drawings = LRU(max_active)
    
    def get_drawing(self, room):
        for (k, v) in self.drawings.items():
            logger.debug('{}: {} (len {})'.format(id(self), k , len(v)))
        try:
            d = self.drawings[room]
        except KeyError:
            d = self.drawings[room] = [ ]
        return d
예제 #13
0
파일: index.py 프로젝트: BERENZ/libpostal
    def __init__(self, index=None, polygons=None, polygons_db=None, save_dir=None,
                 index_filename=None,
                 polygons_db_path=None,
                 include_only_properties=None):
        if save_dir:
            self.save_dir = save_dir
        else:
            self.save_dir = None

        if not index_filename:
            index_filename = self.INDEX_FILENAME

        self.index_path = os.path.join(save_dir or '.', index_filename)

        if not index:
            self.create_index(overwrite=True)
        else:
            self.index = index

        if include_only_properties and hasattr(include_only_properties, '__contains__'):
            self.include_only_properties = include_only_properties

        if not polygons and not self.persistent_polygons:
            self.polygons = {}
        elif polygons and not self.persistent_polygons:
            self.polygons = polygons
        elif self.persistent_polygons and self.cache_size > 0:
            self.polygons = LRU(self.cache_size)
            if polygons:
                for key, value in six.iteritems(polygons):
                    self.polygons[key] = value

            self.cache_hits = 0
            self.cache_misses = 0

            self.get_polygon = self.get_polygon_cached

        if not polygons_db_path:
            polygons_db_path = os.path.join(save_dir or '.', self.POLYGONS_DB_DIR)

        if not polygons_db:
            self.polygons_db = LevelDB(polygons_db_path)
        else:
            self.polygons_db = polygons_db

        self.setup()

        self.i = 0
예제 #14
0
 def __init__(self, mtp, mountpoint, is_debug=False, logger=None):
    global VERBOSE
    self.mtp = mtp
    self.is_debug = is_debug
    self.tempdir = tempfile.mkdtemp(prefix='pymtpfs')
    if not bool(self.tempdir) or not os.path.exists(self.tempdir):
       self.tempdir = tempfile.gettempdir()
    self.read_timeout = 2
    self.write_timeout = 2      
    self.openfile_t = namedtuple('openfile', 'handle, path, mtp_path, readonly')
    self.openfiles = {}
    self.log = logger
    self.created = LRU(1000) 
    if VERBOSE:         
       print("Mounted %s on %s" % (self.mtp, ))
    self.log.info("Mounted %s on %s" % (self.mtp, mountpoint))
예제 #15
0
 def test_capacity_set(self):
     for size in SIZES:
         l = LRU(size)
         for i in range(size+5):
             l[i] = str(i)
         l.set_size(size+10)
         self.assertTrue(size+10 == l.get_size())
         self.assertTrue(len(l) == size)
         for i in range(size+20):
             l[i] = str(i)
         self.assertTrue(len(l) == size+10)
         l.set_size(size+10-1)
         self.assertTrue(len(l) == size+10-1)
예제 #16
0
파일: clusters.py 프로젝트: nishucsd/thesis
class topic:
    def __init__(self, c_hash, c_user, c_words):
        self.topic_count =1
        self.l1 = LRU(c_hash)
        self.l2 = LRU(c_user)
        self.l3 = LRU(c_words)

    def set_hashLRU(self,l):
        self.set(self.l1, l)

    def set_userLRU(self,l):
        self.set(self.l2, l)

    def set_wordLRU(self,l):
        self.set(self.l3, l)

    def set(self, lru, l):
        for k in l:
            v = lru.get(k,0)
            lru[k]=v+1

    def set_cluster(self, hashtags, users, words):
        for k in hashtags:
            v = self.l1.get(k,0)
            self.l1[k]=v+1
        for k in users:
            v = self.l2.get(k,0)
            self.l2[k]=v+1
        for k in words:
            v = self.l3.get(k,0)
            self.l3[k]=v+1
        self.topic_count+=1

    def get_similarity(self,hashtags,users,words):
        h_sum = 0
        u_sum = 0
        w_sum = 0

        for h in hashtags:
            h_sum+= self.l1.get(h,0)
        for u in users:
            u_sum+= self.l2.get(u,0)
        for w in words:
            w_sum+= self.l3.get(w,0)

        similarity = 0.5*h_sum + 0.2*u_sum + 0.3*w_sum
        return similarity
예제 #17
0
 def __init__(self, c_hash, c_user, c_words):
     self.topic_count =1
     self.l1 = LRU(c_hash)
     self.l2 = LRU(c_user)
     self.l3 = LRU(c_words)
예제 #18
0
    _cache_projects = max(_cache_projects, 1)

# here we need to know all the items
_scenes_list: Optional[Dict[str, IdDesc]] = None
_projects_list: Optional[Dict[str, IdDesc]] = None

# here we can forget least used items
if TYPE_CHECKING:
    _scenes: Optional[Dict[str, Scene]] = None
    _projects: Optional[Dict[str, Project]] = None

    if _cache_enabled:
        _scenes = {}
        _projects = {}
else:
    _scenes = LRU(_cache_scenes) if _cache_enabled and _cache_scenes else None
    _projects = LRU(_cache_projects) if _cache_enabled and _cache_projects else None


async def initialize_module() -> None:

    if _cache_enabled:

        global _scenes_list
        global _projects_list

        _scenes_list = {}
        _projects_list = {}

        for it in (await ps.get_projects()).items:
            _projects_list[it.id] = it
예제 #19
0
파일: test_lru.py 프로젝트: fabioz/lru-dict
 def test_overwrite(self):
     l = LRU(1)
     l[1] = '2'
     l[1] = '1'
     self.assertEquals('1', l[1])
     self._check_kvi([1], l)
예제 #20
0
class MTPFS(LoggingMixIn, Operations):   
   def __init__(self, mtp, mountpoint, is_debug=False, logger=None):
      global VERBOSE
      self.mtp = mtp
      self.is_debug = is_debug
      self.tempdir = tempfile.mkdtemp(prefix='pymtpfs')
      if not bool(self.tempdir) or not os.path.exists(self.tempdir):
         self.tempdir = tempfile.gettempdir()
      self.read_timeout = 2
      self.write_timeout = 2      
      self.openfile_t = namedtuple('openfile', 'handle, path, mtp_path, readonly')
      self.openfiles = {}
      self.log = logger
      self.created = LRU(1000) 
      if VERBOSE:         
         print("Mounted %s on %s" % (self.mtp, ))
      self.log.info("Mounted %s on %s" % (self.mtp, mountpoint))
   
   def __openfile_by_path(self, path):
      return next((en for en in self.openfiles.values() if en.mtp_path == path), None)

   def destroy(self, path):
      self.mtp.close()
      for openfile in self.openfiles.values():
         try:
            os.close(openfile.handle)
         except:
            self.log.exception("")
      try:
         if self.tempdir != tempfile.gettempdir():
            shutil.rmtree(self.tempdir)
      except:
         self.log.exception("")
      return 0
      
   def chmod(self, path, mode):
      return 0

   def chown(self, path, uid, gid):
      return 0
   
#   @log_calls
   def getattr(self, path, fh=None):
      attrib = {}
      path = fix_path(path, self.log)
      entry = self.mtp.get_path(path)
      if entry is None:
         entry = self.created.get(path)
      if entry is None:         
         raise FuseOSError(errno.ENOENT)
      else:
         try:
            attrib = entry.get_attributes()
         except Exception, e:
            self.log.exception("")
            attrib = {}
            exmess = ""
            try:
               exmess = str(e.message)
            except:
               exmess = "Unknown"
            self.log.error('Error reading MTP attributes for %s (%s)' % (path, exmess))
            raise FuseOSError(errno.ENOENT)            
      return attrib      
예제 #21
0
    def test_hits(self):
        for size in SIZES:
            l = LRU(size)
            for i in range(size):
                l[i] = str(i)

            val = l[0]
            self.assertTrue(l.get_hits() == 1)
            self.assertTrue(l.get_misses() == 0)

            val = l.get(0, None)
            self.assertTrue(l.get_hits() == 2)
            self.assertTrue(l.get_misses() == 0)

            val = l.get(-1, None)
            self.assertTrue(l.get_hits() == 2)
            self.assertTrue(l.get_misses() == 1)

            try:
                val = l[-1]
            except:
                pass

            self.assertTrue(l.get_hits() == 2)
            self.assertTrue(l.get_misses() == 2)

            l.clear()
            self.assertTrue(len(l) == 0)
            self.assertTrue(l.get_hits() == 0)
            self.assertTrue(l.get_misses() == 0)
예제 #22
0
파일: test_lru.py 프로젝트: fabioz/lru-dict
    def test_stats(self):
        for size in SIZES:
            l = LRU(size)
            for i in range(size):
                l[i] = str(i)

            self.assertTrue(l.get_stats() == (0, 0))

            val = l[0]
            self.assertTrue(l.get_stats() == (1, 0))

            val = l.get(0, None)
            self.assertTrue(l.get_stats() == (2, 0))

            val = l.get(-1, None)
            self.assertTrue(l.get_stats() == (2, 1))

            try:
                val = l[-1]
            except:
                pass

            self.assertTrue(l.get_stats() == (2, 2))

            l.clear()
            self.assertTrue(len(l) == 0)
            self.assertTrue(l.get_stats() == (0, 0))
예제 #23
0
                    assert penalties[
                        index] == penalty_numerator // penalty_denominator


def transition_state_to_leak(spec, state, epochs=None):
    if epochs is None:
        # +2 because finality delay is based on previous_epoch and must be more than `MIN_EPOCHS_TO_INACTIVITY_PENALTY`
        epochs = spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2
    assert epochs > spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY

    for _ in range(epochs):
        next_epoch(spec, state)
    assert spec.is_in_inactivity_leak(state)


_cache_dict = LRU(size=10)


def leaking(epochs=None):
    def deco(fn):
        def entry(*args, spec, state, **kw):
            # If the pre-state is not already known in the LRU, then take it,
            # transition it to leak, and put it in the LRU.
            # The input state is likely already cached, so the hash-tree-root does not affect speed.
            key = (state.hash_tree_root(),
                   spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY, spec.SLOTS_PER_EPOCH,
                   epochs)
            global _cache_dict
            if key not in _cache_dict:
                transition_state_to_leak(spec, state, epochs=epochs)
                _cache_dict[key] = state.get_backing(
예제 #24
0
if __name__ == "__main__":
    logging.basicConfig(
        level=logging.INFO,
        #logging.basicConfig(level=logging.DEBUG,
        format='%(asctime)s %(name)-11s %(levelname)-5s %(message)s',
        datefmt='%Y-%m-%d %H:%M')

    if len(argv) < 5:
        print "usage: average.py N M K TRIES"
        exit(1)
    try:
        n = int(argv[1])
        m = int(argv[2])
        k = int(argv[3])
        tries = int(argv[4])
    except:
        logging.critical("Invalid argument.")
        exit(2)

    assert n > 0 and m > 0 and k > 0 and tries > 0

    fifo_alg = FIFO()
    lru_alg = LRU()
    algs = [fifo_alg, lru_alg]

    x = run_iteration2(tries, algs, n, m, k)
    for i in x:
        logging.info("FIFO: %d (C=%f), LRU: %d (C=%f), OPT: %d" %
                     (i[0], 1.0 * i[0] / i[2], i[1], 1.0 * i[1] / i[2], i[2]))
예제 #25
0
 def test_decrypt_token(self, kms_mock):
     time_format = "%Y%m%dT%H%M%SZ"
     now = datetime.datetime.utcnow()
     not_before = now.strftime(time_format)
     _not_after = now + datetime.timedelta(minutes=60)
     not_after = _not_after.strftime(time_format)
     payload = json.dumps({
         'not_before': not_before,
         'not_after': not_after
     })
     kms_mock.return_value = {'Plaintext': payload, 'KeyId': 'mocked'}
     self.assertEqual(
         keymanager.decrypt_token(1, 'service', 'confidant-unittest',
                                  'ZW5jcnlwdGVk'), {
                                      'payload': json.loads(payload),
                                      'key_alias': 'authnz-testing'
                                  })
     keymanager.TOKENS = LRU(4096)
     self.assertEqual(
         keymanager.decrypt_token(2, 'user', 'testuser', 'ZW5jcnlwdGVk'), {
             'payload': json.loads(payload),
             'key_alias': 'authnz-testing'
         })
     keymanager.TOKENS = LRU(4096)
     with self.assertRaisesRegexp(keymanager.TokenDecryptionError,
                                  'Unacceptable token version.'):
         keymanager.decrypt_token(3, 'user', 'testuser', 'ZW5jcnlwdGVk')
     with self.assertRaisesRegexp(
             keymanager.TokenDecryptionError,
             'Authentication error. Unsupported user_type.'):
         keymanager.decrypt_token(2, 'unsupported', 'testuser',
                                  'ZW5jcnlwdGVk')
     # Missing KeyId, will cause an exception to be thrown
     kms_mock.return_value = {'Plaintext': payload}
     with self.assertRaisesRegexp(keymanager.TokenDecryptionError,
                                  'Authentication error. General error.'):
         keymanager.decrypt_token(2, 'service', 'confidant-unittest',
                                  'ZW5jcnlwdGVk')
     # Payload missing not_before/not_after
     empty_payload = json.dumps({})
     kms_mock.return_value = {'Plaintext': empty_payload, 'KeyId': 'mocked'}
     with self.assertRaisesRegexp(
             keymanager.TokenDecryptionError,
             'Authentication error. Missing validity.'):
         keymanager.decrypt_token(2, 'service', 'confidant-unittest',
                                  'ZW5jcnlwdGVk')
     # lifetime of 0 will make every token invalid. testing for proper delta
     # checking.
     lifetime = app.config['AUTH_TOKEN_MAX_LIFETIME']
     app.config['AUTH_TOKEN_MAX_LIFETIME'] = 0
     kms_mock.return_value = {'Plaintext': payload, 'KeyId': 'mocked'}
     with self.assertRaisesRegexp(
             keymanager.TokenDecryptionError,
             'Authentication error. Token lifetime exceeded.'):
         keymanager.decrypt_token(2, 'service', 'confidant-unittest',
                                  'ZW5jcnlwdGVk')
     app.config['AUTH_TOKEN_MAX_LIFETIME'] = lifetime
     # Token too old
     now = datetime.datetime.utcnow()
     _not_before = now - datetime.timedelta(minutes=60)
     not_before = _not_before.strftime(time_format)
     _not_after = now - datetime.timedelta(minutes=1)
     not_after = _not_after.strftime(time_format)
     payload = json.dumps({
         'not_before': not_before,
         'not_after': not_after
     })
     kms_mock.return_value = {'Plaintext': payload, 'KeyId': 'mocked'}
     with self.assertRaisesRegexp(
             keymanager.TokenDecryptionError,
             'Authentication error. Invalid time validity for token'):
         keymanager.decrypt_token(2, 'service', 'confidant-unittest',
                                  'ZW5jcnlwdGVk')
     # Token too young
     now = datetime.datetime.utcnow()
     _not_before = now + datetime.timedelta(minutes=60)
     not_before = _not_before.strftime(time_format)
     _not_after = now + datetime.timedelta(minutes=120)
     not_after = _not_after.strftime(time_format)
     payload = json.dumps({
         'not_before': not_before,
         'not_after': not_after
     })
     kms_mock.return_value = {'Plaintext': payload, 'KeyId': 'mocked'}
     with self.assertRaisesRegexp(
             keymanager.TokenDecryptionError,
             'Authentication error. Invalid time validity for token'):
         keymanager.decrypt_token(2, 'service', 'confidant-unittest',
                                  'ZW5jcnlwdGVk')
예제 #26
0
 def __init__(self):
     self.cache = LRU(400)
     self.input_shape = None
     self.nb_classes = None
     self.model = None
     self.config = None
예제 #27
0
 def __init__(self):
     self._gribs = LRU(10)
예제 #28
0
)
from eth2.beacon.db.schema import SchemaV1
from eth2.beacon.fork_choice.scoring import BaseForkChoiceScoring, BaseScore
from eth2.beacon.helpers import compute_epoch_at_slot
from eth2.beacon.types.blocks import BaseBeaconBlock, BaseSignedBeaconBlock
from eth2.beacon.types.nonspec.epoch_info import EpochInfo
from eth2.beacon.types.states import BeaconState  # noqa: F401
from eth2.beacon.typing import Epoch, Root, Slot
from eth2.configs import Eth2Config

# When performing a chain sync (either fast or regular modes), we'll very often need to look
# up recent blocks to validate the chain, and decoding their SSZ representation is
# relatively expensive so we cache that here, but use a small cache because we *should* only
# be looking up recent blocks. We cache by root instead of ssz representation as ssz
# representation is not unique if different length configs are considered
state_cache = LRU(128)
block_cache = LRU(128)


class AttestationKey(ssz.Serializable):
    fields = [("block_root", ssz.sedes.bytes32), ("index", ssz.sedes.uint8)]


class BaseBeaconChainDB(ABC):
    db: AtomicDatabaseAPI = None

    @abstractmethod
    def __init__(self, db: AtomicDatabaseAPI,
                 genesis_config: Eth2Config) -> None:
        ...
예제 #29
0
파일: account.py 프로젝트: sjyi/py-evm
    Account, )
from eth.validation import (
    validate_is_bytes,
    validate_uint256,
    validate_canonical_address,
)
from eth.tools.logging import (TraceLogger)
from eth.utils.padding import (
    pad32, )

from .hash_trie import HashTrie

# Use lru-dict instead of functools.lru_cache because the latter doesn't let us invalidate a single
# entry, so we'd have to invalidate the whole cache in _set_account() and that turns out to be too
# expensive.
account_cache = LRU(2048)


class BaseAccountDB(ABC):
    @abstractmethod
    def __init__(self) -> None:
        raise NotImplementedError("Must be implemented by subclasses")

    @property
    @abstractmethod
    def state_root(self):
        raise NotImplementedError("Must be implemented by subclasses")

    @abstractmethod
    def has_root(self, state_root: bytes) -> bool:
        raise NotImplementedError("Must be implemented by subclasses")
class DatabaseEnv(gym.Env):
    metadata = {'render.modes': ['human']}

    def __init__(self, args={}):
        super(DatabaseEnv, self).__init__()

        # Number of actions that the database can take
        # { Create View, Do nothing }
        N_DISCRETE_ACTIONS = 2

        # Number of tables in the database being considered
        N_TABLES = 21
        N_JOIN_COMBINATIONS = int((N_TABLES * (N_TABLES - 1)) / 2)

        self.database = Database()
        self.table_names = self.database.get_table_names_from_hive()
        self.join_name_mappings = self.get_mapping_for_tables(self.table_names)

        # Maximum number of steps in an episode
        N_MAX_STEPS = 5
        N_MAX_JOINS = 2

        # Define action and observation space
        # They must be gym.spaces objects
        self.action_space = spaces.Discrete(N_DISCRETE_ACTIONS)
        self.observation_space = spaces.Box(low=0,
                                            high=1,
                                            shape=(N_JOIN_COMBINATIONS, ),
                                            dtype=np.uint8)

        # Capture information about episode to replay the same
        # on the real database
        self.max_steps = N_MAX_STEPS
        self.history = self.reset_env_history()
        self.current_step = 0
        self.current_views = []
        self.candidate_cost = 100
        exclusion_list = ['schema.sql', 'fkindexes.sql']
        self.queries = self.get_queries_from_dataset(
            '/home/richhiey/Desktop/workspace/dbse_project/Self-Driving-Materialized-Views/project/data/JOB',
            exclusion_list)
        pickle_file_path = '/home/richhiey/Desktop/workspace/dbse_project/Self-Driving-Materialized-Views/project/data/JOB/processed/job_processed.pickle'
        self.candidates = self.get_candidates_for_dataset(pickle_file_path)
        self.workload_distribution = self.get_workload_distribution(
            self.queries)
        self.current_candidate_queue = deque()
        self._obs_space = np.zeros(N_JOIN_COMBINATIONS)
        self._current_action = np.zeros(N_JOIN_COMBINATIONS)
        self.lru_cache_size = 20
        self.lru_cache = LRU(self.lru_cache_size)

    def get_mapping_for_tables(self, table_names):
        mapping = {}
        names = []
        for name in table_names:
            name = name[0]
            print(name)
            names.append(name)
        self.table_names = names
        num = 0
        for i in range(len(names)):
            for j in range(i + 1, len(names)):
                join_name = names[i] + '-' + names[j]
                num = num + 1
                mapping[num] = join_name
        print(mapping)
        return mapping

    def reset_env_history(self):
        history = {}
        for i in range(1, self.max_steps):
            history[i] = {'actions': [], 'query': ''}
        return history

    def get_workload_distribution(self, queries):
        # An array of the index value for weighting
        i = np.arange(len(queries))
        # Higher weights for larger index values
        w = np.exp(i / 10.)
        # Weight must be normalized
        w /= w.sum()
        return w

    def get_candidates_for_query(self, query):
        return self.candidates['data/JOB/' + query]

    def get_candidates_for_dataset(self, pickle_file_path):
        with open(pickle_file_path, 'rb') as pickle_file:
            candidates = pickle.load(pickle_file)
        new_candidates = {}
        for candidate in candidates:
            for key, value in candidate.items():
                new_candidates[key] = value
        return new_candidates

    def get_queries_from_dataset(self, dataset_path, exclusion_list):
        queries = []
        for root, dirs, files in os.walk(dataset_path):
            for file in files:
                if file in exclusion_list:
                    continue
                if '.sql' in file:
                    queries.append(file)
        return queries

    def step(self, action):
        # Use the action predicted by agent to modify the
        # database environment and calculate reward of the action
        delay_modifier = (self.current_step / self.max_steps)
        # print(self._obs_space)
        print(self.current_step)
        if not self.current_candidate_queue:
            self.current_step = self.current_step + 1
            self.selected_query = np.random.choice(
                self.queries, size=1, p=self.workload_distribution)[0]
            self.history[self.current_step]['query'] = self.selected_query
            candidates = self.get_candidates_for_query(self.selected_query)
            print(self.selected_query)
            for candidate in candidates:
                candidate = candidate.flatten()
                self.current_candidate_queue.append(candidate)

        current_candidate = self.current_candidate_queue.popleft()
        print('Action - ' + str(action))
        cand_idx = np.where(current_candidate == 1)[0]
        print('Candidate - ' + self.join_name_mappings[int(cand_idx)])
        self.lru_cache[self.selected_query] = current_candidate

        # Log some info about this training step
        self.history[self.current_step]['actions'].append({
            'action':
            action,
            'candidate':
            current_candidate,
            'obs_space':
            self._obs_space,
            'eviction':
            self.lru_cache.peek_last_item(),
        })

        reward, eviction = self._take_action(action, current_candidate,
                                             delay_modifier)
        print('Reward - ' + str(reward))
        done = self.current_step >= self.max_steps

        if done and len(self.current_candidate_queue):
            reward = get_final_reward_for_episode()
            info = {}
            done = True
        else:
            done = False

        obs = self._next_observation()

        return obs, reward, done, self.history

    # Reset the state of the environment to an initial state
    def reset(self):
        self.history = self.reset_env_history()
        self.current_step = 0
        self.current_views = []
        self.candidate_cost = 100
        self._obs_space = np.zeros(N_JOIN_COMBINATIONS)
        self._current_action = np.zeros(N_JOIN_COMBINATIONS)
        self.lru_cache = LRU(self.lru_cache_size)
        return self._next_observation()

    def render(self, mode='human', close=False):
        pass

    def _next_observation(self):
        return self._obs_space

    def env_cost_of_episode(self):
        run_time = 0
        for step, step_history in self.history.items():
            print('------------ Step - ' + str(step) + ' -------------')
            # First run the query and check the base cost
            query = step_history['query']
            print(query)
            with open(
                    os.path.join(
                        '/home/richhiey/Desktop/workspace/dbse_project/Self-Driving-Materialized-Views/project/data/JOB/',
                        query), 'r') as f:
                query_str = f.read()
                start_time = time.time()
                print('Actually executing on database now ..')
                query_output = self.database.execute_query(query_str)
                total_time = time.time() - start_time
                print('Time taken - ' + str(total_time))
                run_time = run_time + total_time
                print('Execution done!')

            def get_view_creation_query(tbl_1, tbl_2):
                view_name = str(tbl_1) + '_' + str(tbl_2)
                query_str = str(tbl_1) + ' JOIN ' + str(tbl_2) + ';'
                query_str = query_str + "CREATE VIEW IF NOT EXISTS " + view_name + " AS " + query_str
                return query_str

            # Then run through the history and get costs for the actions
            # taken by the agent
            if len(step_history['actions']) > 0:
                for step in step_history['actions']:
                    if step['action']:
                        idx = np.where(step['candidate'] == 1)
                        print(idx)
                        temp = self.join_table_mapping[int(idx)].split('-')
                        table_1 = temp[0]
                        table_2 = temp[1]
                        query_str = get_view_creation_query(table_1, table_2)
                        start_time = time.time()
                        query_output = self.database.execute_query(query_str)
                        total_time = time.time() - start_time
                        print('View Creation Time taken - ' + str(total_time))
                        run_time = run_time + total_time
            print('Total runtime - ' + str(run_time))
            print('---------------------------------------------------')
        return run_time

    def hawc_cost_for_episode(self):
        return np.random.randint(0, 100)

    def calculate_reward_for_episode(self):
        initial_reward = 20
        env_reward = self.env_cost_of_episode()
        print(env_reward)
        hawc_reward = self.hawc_cost_for_episode()
        return ((env_reward - initial_reward) /
                (hawc_reward - initial_reward)) * 1000

    def _take_action(self, action, candidate, delay_modifier):
        if action:
            # Add the created view to the obs space
            # self._obs_space = np.add(self._obs_space, candidate)
            # Calculate reward
            if self.current_step < self.max_steps - 1:
                reward = 1
            else:
                # - Do some magic to get cost of the queries
                # to calculate a useful cost for episode
                # - Calculate reward using that
                reward = self.calculate_reward_for_episode()
        else:
            # Add the created view to the obs space
            # Calculate reward
            if self.current_step < self.max_steps - 1:
                reward = 0
            else:
                # - Do some magic to get cost of the queries
                # to calculate a useful cost for episode
                # - Calculate reward using that
                reward = self.calculate_reward_for_episode()
        return reward, False
예제 #31
0
 def test_peek_last_item(self):
     l = LRU(2)
     self.assertEqual(None, l.peek_last_item())
     l[1] = '1'
     l[2] = '2'
     self.assertEqual((1, '1'), l.peek_last_item())
예제 #32
0
def get_memory_cache():
    global _lru
    if _lru is None:
        settings = app_settings['redis']
        _lru = LRU(settings['memory_cache_size'])
    return _lru
예제 #33
0
import concurrent
import json
import os

from aiohttp.web import HTTPNotFound, Response, StreamResponse

from guillotina import app_settings, configure
from guillotina.api.service import DownloadService
from guillotina.component import getUtility
from guillotina.utils import get_dotted_name
from lru import LRU

from . import auth, downloader
from .utility import IGlexUtility

_cache = LRU(100)
CHUNK_SIZE = 1024 * 1024 * 1


def invalidate(key):
    if key in _cache:
        del _cache[key]


def cache(duration=10 * 60):
    def decorator(original_func):
        key = get_dotted_name(original_func)

        async def func(*args):
            if key in _cache:
                return _cache[key]
예제 #34
0
from flask_cors import CORS
import os
from uuid import UUID, uuid4
from lru import LRU
import json

from decisiontree import questions_tree, Node
from naics import company_infos, names
from converter import passNAICS

app = Flask(__name__)
CORS(app, supports_credentials=True)
app.secret_key = os.urandom(24)
app.config.update(SESSION_COOKIE_HTTPONLY=False, SESSION_COOKIE_SECURE=True)

states: Dict[UUID, Union[Node, int]] = LRU(10000)


@app.route("/")
def index():
    return "Hello, World!"


@app.route("/naics/<int:number>", methods=['GET'])
def naics(number: int):
    companies = []
    for c in company_infos:
        if c.NAICS1 == number or c.NAICS2 == number:
            companies.append(
                {'CorporateName': c.CorporateName, 'SalesVolume': c.SalesVolume})
    return jsonify({'companies': companies, 'name': names[number]})
예제 #35
0
파일: test_lru.py 프로젝트: fabioz/lru-dict
 def test_capacity_get(self):
     for size in SIZES:
         l = LRU(size)
         self.assertTrue(size == l.get_size())
예제 #36
0
            inclusion_slot = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY
            include_attestations = [
                att for att in attestations if att.data.slot == inclusion_slot
            ]
            add_attestations_to_state(spec, state, include_attestations,
                                      state.slot)
        next_slot(spec, state)

    assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
    if not is_post_altair(spec):
        assert len(state.previous_epoch_attestations) == len(attestations)

    return attestations


_prep_state_cache_dict = LRU(size=10)


def cached_prepare_state_with_attestations(spec, state):
    """
    Cached version of prepare_state_with_attestations,
    but does not return anything, and does not support a participation fn argument
    """
    # If the pre-state is not already known in the LRU, then take it,
    # prepare it with attestations, and put it in the LRU.
    # The input state is likely already cached, so the hash-tree-root does not affect speed.
    key = (spec.fork, state.hash_tree_root())
    global _prep_state_cache_dict
    if key not in _prep_state_cache_dict:
        prepare_state_with_attestations(spec, state)
        _prep_state_cache_dict[key] = state.get_backing(
예제 #37
0
파일: fcp.py 프로젝트: verolero86/pcircle
    def __init__(self, circle, src, dest,
                 treewalk=None,
                 totalsize=0,
                 hostcnt=0,
                 prune=False,
                 verify=False,
                 resume=False,
                 workq=None):
        BaseTask.__init__(self, circle)
        self.circle = circle
        self.treewalk = treewalk
        self.totalsize = totalsize
        self.prune = prune
        self.workq = workq
        self.resume = resume
        self.checkpoint_file = None
        self.src = src
        self.dest = os.path.abspath(dest)

        # cache, keep the size conservative
        # TODO: we need a more portable LRU size

        if hostcnt != 0:
            max_ofile, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
            procs_per_host = self.circle.size / hostcnt
            self._read_cache_limit = ((max_ofile - 64) / procs_per_host) / 3
            self._write_cache_limit = ((max_ofile - 64) / procs_per_host) * 2 / 3

        if self._read_cache_limit <= 0 or self._write_cache_limit <= 0:
            self._read_cache_limit = 1
            self._write_cache_limit = 8

        self.rfd_cache = LRU(self._read_cache_limit)
        self.wfd_cache = LRU(self._write_cache_limit)

        self.cnt_filesize_prior = 0
        self.cnt_filesize = 0

        self.blocksize = 1024 * 1024
        self.chunksize = 1024 * 1024

        # debug
        self.d = {"rank": "rank %s" % circle.rank}
        self.wtime_started = MPI.Wtime()
        self.wtime_ended = None
        self.workcnt = 0  # this is the cnt for the enqued items
        self.reduce_items = 0  # this is the cnt for processed items
        if self.treewalk:
            log.debug("treewalk files = %s" % treewalk.flist, extra=self.d)

        # fini_check
        self.fini_cnt = Counter()

        # verify
        self.verify = verify
        self.chunksums = []

        # checkpointing
        self.checkpoint_interval = sys.maxsize
        self.checkpoint_last = MPI.Wtime()

        if self.circle.rank == 0:
            print("Start copying process ...")
예제 #38
0
    result[:shape_of_arr[0], :shape_of_arr[1]] = arr
    return result


# Get element from dictionary using key `item`, if nothing found, return None.
def getOrNone(dict, item):
    if item in dict:
        return dict[item]
    return None


# Database instance.
dbi = None

# Sample cache to handle same samples being used over and over in the same Chart.
sample_cache = LRU(5000)


# Get audio class of a sample by its hash.
def getSampleClass(hash):
    global dbi
    if dbi is None:
        dbi = MongoDBInterface()
        dbi.open()
    if hash not in sample_cache:
        try:
            object_class = dbi.getLabelForSample(hash)
            class_number = possible_audio_classes[object_class]
        except:
            class_number = default_class
        sample_cache[hash] = class_number
예제 #39
0
 def __init__(self, name, params):
     BaseCache.__init__(self, params)
     self._cache = _caches.setdefault(name, LRU(self._max_entries))
     self._lock = _locks.setdefault(name, RWLock())
예제 #40
0
class Synonyms(object):
    def __init__(self, jieba_path, lru_cap):
        self.psg = Jieba(jieba_path)
        self.cache = LRU(lru_cap)

    def load_vecs(self, load_fn, w2vFile):
        """
        params:
            load_fn | function - must return dict && 2 dims numpy.array
        """
        self.word2idx, self.weights = load_fn(w2vFile)
        self.idx2word = {v: k for k, v in self.word2idx.items()}
        print(f"load vector completed, vec size - {self.weights.shape}")

        print("build kd tree")
        self.kdtree = KDTree(self.weights, leaf_size=10, metric="euclidean")

        self.vec_len = self.weights.shape[1]

    def _word2vec(self, word, ignore_type="random"):
        """
        ignore_type | string  - "zeros" or "random"
        """
        if word in self.word2idx:
            return self.weights[self.word2idx[word]]
        else:
            return np.random.uniform(low=-2, high=2, size=self.vec_len)

    def _text2vec(self, text, ret_num=5):
        vectors = []
        for w in text:
            c = []
            if w in self.word2idx:
                c.append(self._word2vec(w))
                for nw, _ in self.nearby(w, ret_num=ret_num):
                    c.append(self._word2vec(nw))

                r = np.average(c, axis=0)
                vectors.append(r)
            else:
                vectors.append(
                    np.random.uniform(low=-2, high=2, size=self.vec_len))
        return np.sum(vectors, axis=0)

    def nearby(self, word, ret_num=10, ignore_type="random"):
        word = word.strip()

        # cache
        rets = self.cache.search(word)
        if rets is not None:
            for ret in rets:
                yield ret

        vec = self._word2vec(word, ignore_type)
        [distances], [points] = self.kdtree.query(np.array([vec]),
                                                  k=ret_num,
                                                  return_distance=True)
        rets = []
        for (x, y) in zip(points, distances):
            w = self.idx2word[x]
            if w == word:
                s = 1.0
            else:
                s = cosine(vec, self.weights[x])
            rets.append((w, min(s, 1.0)))

        rets = list(sorted(rets, key=lambda x: x[1], reverse=True))
        self.cache.add(word, rets)
        for ret in rets:
            yield ret

    def similarity(self, s1, s2, top=5):
        if s1 == s2:
            return 1.

        s1 = self.psg.segment(s1)
        s2 = self.psg.segment(s2)

        assert len(s1) > 0 and len(
            s2) > 0, "The length of s1 and s2 should > 0."

        return cosine(self._text2vec(s1, top), self._text2vec(s2, top))
예제 #41
0
class topic4:
    def __init__(self, c_hash, c_user, c_words):
        self.topic_count =1
        # self.time = (self.first,self.last)
        self.l1 = LRU(c_hash)
        self.first =""
        self.last=""
        self.lats=[]
        self.longs=[]
        self.l2 = LRU(c_user)
        self.l3 = LRU(c_words)
        self.l4 = LRU(400)
    def set_hashLRU(self,l):
        self.set(self.l1, l)

    def set_userLRU(self,l):
        self.set(self.l2, l)

    def set_wordLRU(self,l):
        self.set(self.l3, l)

    def set(self, lru, l):
        for k in l:
            v = lru.get(k,0)
            lru[k]=v+1

    def set_cluster(self, hashtags, users, words,links, cords):
        for k in hashtags:
            self.l1[k]=self.l1.get(k,0)+1
        for k in users:
            self.l2[k]=self.l2.get(k,0)+1
        for k in words:
            self.l3[k]=self.l3.get(k,0)+1
        for k in links:
            self.l4[k]=self.l4.get(k,0)+1
        if(cords is not None):
            self.lats.append(cords["coordinates"][1])
            self.longs.append(cords["coordinates"][0])
        self.topic_count+=1

    def get_similarity(self,hashtags,users,words):
        h_sum = 1
        u_sum = 1
        w_sum = 1
        h_match =0
        h_ind =0
        u_ind =0
        w_ind =0
        c=0
        h1 = self.l1.get_size()
        u1 = self.l2.get_size()
        w1 = self.l3.get_size()
        for h in hashtags:
            # l1_items=zip(*self.l1.items())
            h_sum+= self.l1.get(h,0)
            if(self.l1.has_key(h)):
                ind = self.l1.keys().index(h)
                h_ind+= h1 - ind
                h_match+= 1 if ind<250 else 0
        for u in users:
            u_sum+= self.l2.get(u,0)
            if(self.l2.has_key(u)):
                u_ind+= u1 - self.l2.keys().index(u)
        for w in words:
            w_sum+= self.l3.get(w,0)
            if(self.l3.has_key(w)):
                w_ind+= w1 - self.l3.keys().index(w)
        if(h_match !=0):
            c = h_match -1
        # print(h_ind,h1,u_ind,u1,w_ind,w1, h_sum,w_sum,)
        similarity = (h_ind/(h1+1))*(h_sum/sum(self.l1.values() +[1])) + (u_ind/(u1+1))*(u_sum/sum(self.l2.values()+[1])) + (w_ind/(w1+1))*(w_sum/sum(self.l3.values()+[1])) +c
        return similarity
    def flush1(self, cache, size):
        if(len(cache.keys())>5):
            tokens = reversed(cache.keys()[5])
            cache.clear()
            for i in tokens:
                cache[i]=1


    def flush(self):
        self.flush1(self.l1,500)
        self.flush1(self.l2, 500)
        self.flush1(self.l3,3500)
        self.topic_count=1
예제 #42
0
class MeituDataset(Dataset):
    def __init__(self,
                 data_dir,
                 label_file,
                 n_frame=32,
                 crop_size=112,
                 scale_w=136,
                 scale_h=136,
                 train=True,
                 device_id=0,
                 cache_size=20,
                 mode='dense',
                 transform=None):
        super(MeituDataset, self).__init__()

        def evicted(shape, loader):
            del loader
            #print("loader del shape:",shape)

        self.datadir = data_dir
        self.label_file = label_file
        self.n_frame = n_frame
        self.crop_size = crop_size
        self.scale_w = scale_w
        self.scale_h = scale_h
        self.is_train = train
        self.max_label = 0
        self.clip_list = []
        self.gpu_id = device_id  # type is list
        self.load_list()
        self.nvvl_loader_dict = LRU(cache_size, callback=evicted)
        self.scene_label = list(range(0, 11)) + list(range(
            34, 42))  # multi label classfication
        self.action_label = list(range(11, 34)) + list(
            range(42, 63)
        )  # single label classification # every one sample has a action label
        self.scene_length = len(self.scene_label)
        self.action_length = len(self.action_label)
        print("scene_length is ", self.scene_length)
        print("action length is ", self.action_length)
        self.scene_dict = dict([
            (value, index) for index, value in enumerate(self.scene_label)
        ])  # store  (real_label:train_label)
        self.action_dict = dict([
            (value, index) for index, value in enumerate(self.action_label)
        ])  # store (real_label:train_label)

        self.transform = transform
        self.loader_crt_lock = multiprocessing.Lock()
        #self.loader_list =[pynvvl.NVVLVideoLoader(device_id=temp_id, log_level='error') for temp_id in self.gpu_ids]

    def load_list(self):
        """
        load the train list to construct a file label list
        every item in self.clip_list is (file_dir,label_list)
        :return:
        """

        with open(self.label_file, 'r') as fin:
            for line in fin.readlines():
                vid_info = line.split(',')
                file_name = os.path.join(self.datadir, vid_info[0])
                labels = [int(id) for id in vid_info[1:]]
                self.max_label = max(self.max_label, max(labels))
                self.clip_list.append((file_name, labels))
            self.max_label = self.max_label + 1
        logger.info("load data from %s,num_clip_List %d,max_label %d" %
                    (self.datadir, len(self.clip_list), self.max_label))

    def __len__(self):
        return len(self.clip_list)

    def __getitem__(self, index):
        """
        clip a short video from video and coresponding label set
        :param index:
        :return:
        """
        #temp_id = np.random.choice(self.gpu_ids, 1)[0]
        if (index % 2) == 0:
            self.loader_crt_lock.acquire()
            try:
                with cupy.cuda.Device(self.gpu_id):
                    cupy.get_default_memory_pool().free_all_blocks()
            except Exception as e:
                print(e)
                print('index is ', index)
            self.loader_crt_lock.release()

        video_file, tags = self.clip_list[index]
        video_shape = pynvvl.video_size_from_file(video_file)  #width,height

        self.loader_crt_lock.acquire()
        loader = self.nvvl_loader_dict.get(video_shape, None)
        if loader is None:
            loader = pynvvl.NVVLVideoLoader(device_id=self.gpu_id,
                                            log_level='error')
            #print("create decoder id is ",self.gpu_id)
            self.nvvl_loader_dict[video_shape] = loader
        self.loader_crt_lock.release()

        count = loader.frame_count(video_file)
        while count < self.n_frame:
            index += 1
            video_file, tags = self.clip_list[index]
            video_shape = pynvvl.video_size_from_file(
                video_file)  # width,height

            self.loader_crt_lock.acquire()
            loader = self.nvvl_loader_dict.get(video_shape, None)
            if loader is None:
                loader = pynvvl.NVVLVideoLoader(device_id=self.gpu_id,
                                                log_level='error')
                #print("create decoder id is ", self.gpu_id)
                self.nvvl_loader_dict[video_shape] = loader
            self.loader_crt_lock.release()
            count = loader.frame_count(video_file)

        # start frame index
        if self.is_train:
            if count <= self.n_frame:
                frame_start = 0
            else:
                frame_start = np.random.randint(0,
                                                count - self.n_frame,
                                                dtype=np.int32)
        else:
            frame_start = (count - self.n_frame) // 2

        # rescale shape
        #if self.is_train:

        if self.is_train:
            crop_x = np.random.randint(0,
                                       self.scale_w - self.crop_size,
                                       dtype=np.int32)
            crop_y = np.random.randint(0,
                                       self.scale_h - self.crop_size,
                                       dtype=np.int32)
        else:
            crop_x = (self.scale_w - self.crop_size) // 2
            crop_y = (self.scale_h - self.crop_size) // 2  # center crop

        video = loader.read_sequence(
            video_file,
            0,
            count=self.n_frame,
            sample_mode='key_frame',
            horiz_flip=False,
            scale_height=self.scale_h,
            scale_width=self.scale_w,
            crop_y=crop_y,  #along with vertical direction
            crop_x=crop_x,  #along with horizontal direction
            crop_height=self.crop_size,
            crop_width=self.crop_size,
            scale_method='Linear',
            normalized=False)
        scene_label = np.zeros(shape=(self.scene_length),
                               dtype=np.float32)  # multi_label scene
        action_label = self.action_length - 1  # single label action classification
        for tag_index in tags:
            if tag_index in self.scene_label:
                scene_label[self.scene_dict[tag_index]] = 1
            else:
                action_label = self.action_dict[tag_index]

        #transpose from NCHW to NHWC then to Tensor and normalized
        video = (video.transpose(0, 2, 3, 1) / 255 - cupy.array(
            [0.485, 0.456, 0.406])) / cupy.array([0.229, 0.224, 0.225])
        video = video.transpose(
            3, 0, 1, 2)  # from THWC to CTHW then stack to NCTHW for 3D conv.
        np_video = cupy.asnumpy(video)
        del video
        del loader
        return nd.array(np_video), nd.array(
            scene_label
        ), action_label  # video,multi_label and single action_label
예제 #43
0
파일: manager.py 프로젝트: Rhoana/mb
class Manager(object):

    def __init__(self):
        '''
        '''
        self._views = LRU(50)
        # tile cache - enough for 1 MFOV for 10 parallel users
        self._tiles = LRU(61 * 10)

        self._client_tiles = {}

    def start(self):
        '''
        '''
        pass

    def check_path_type(self, data_path):
        '''
        Check whether the data_path is a scan, section or fov.
        '''

        # we should check how many levels deep is the IMAGE_COORDINATES_FILE
        # level 0: this is a FOV
        # level 1: this is a section
        # level 2: this is a scan

        if os.path.exists(
            os.path.join(
                data_path,
                settings.IMAGE_COORDINATES_FILE)):
            return 'FOV'

        if os.path.exists(
            os.path.join(
                data_path,
                Util.get_first_level_subdir(data_path),
                settings.IMAGE_COORDINATES_FILE)):
            return 'SECTION'

        if os.path.exists(
            os.path.join(
                data_path,
                Util.get_second_level_subdir(data_path),
                settings.IMAGE_COORDINATES_FILE)):
            return 'SCAN'

        return None

    def get_tree(self, data_path):
        '''
        '''

        if not data_path:
            data_path = settings.DEFAULT_DATA_FOLDER

        dir_content = sorted(Util.listdir(data_path))

        dir_listing = []

        for c in dir_content:

            full_url = os.path.join(data_path, c)

            # if not os.path.isdir(full_url):
            #   continue

            entry = {}
            entry['label'] = c
            entry['full_url'] = full_url
            entry['id'] = os.path.join(data_path, c)
            entry['load_on_demand'] = True

            dir_listing.append(entry)

        return dir_listing

    def get_content(self, data_path):
        '''
        Sends the content listing for a given path. This detects if the path is
        scan, section or fov.
        '''

        views = []

        path_type = self.check_path_type(data_path)

        # detect if this is a scan, section or fov
        if path_type == 'FOV':

            views.append({'data_path': data_path})

        elif path_type == 'SECTION':

            views.append({'data_path': data_path})

        elif path_type == 'SCAN':

            scan = Scan.from_directory(data_path, False)  # lazy indexing

            for i, section in enumerate(scan._sections):

                views.append(
                    {'data_path': os.path.join(data_path, section.id)})

        return views

    def get_meta_info(self, data_path):
        '''
        Get meta information for a requested data path.
        '''

        if data_path not in self._views.keys():

            path_type = self.check_path_type(data_path)

            # detect if this is a section or fov
            if path_type == 'FOV':
                # this is a FoV
                fov = FoV.from_directory(data_path, True)

                view = View.create(
                    data_path,
                    [fov],
                    fov._width,
                    fov._height,
                    fov._tx,
                    fov._ty,
                    self)

            elif path_type == 'SECTION':

                section = Section.from_directory(data_path, True, True)

                view = View.create(
                    data_path,
                    section._fovs,
                    section._width,
                    section._height,
                    section._tx,
                    section._ty,
                    self,
                    section._luts64_map)

            #
            # and add to our views dictionary
            #
            self._views[data_path] = view

        else:

            view = self._views[data_path]

        meta_info = {}
        meta_info['width'] = view._width
        meta_info['height'] = view._height
        meta_info['layer'] = 0
        meta_info['minLevel'] = 0
        meta_info['maxLevel'] = 1
        meta_info['tileSize'] = settings.CLIENT_TILE_SIZE
        meta_info['centers'] = view._centers

        return meta_info

    def get_image(self, data_path, x, y, z, w):
        '''
        Calculate which file(s) we need for the current openseadragon tile
        and load them as well as downsample them on the fly.
        '''

        # print '-'*80
        # print 'SD', data_path, x, y, z, w

        if settings.CACHE_CLIENT_TILES:

            osd_file_url = (data_path.replace('/', '_') + '_' + str(x) + '_' +
                            str(y) + '_' + str(z) + '_' + str(w) + '.jpg')
            osd_file_url_full = os.path.join(
                settings.CLIENT_TILE_CACHE_FOLDER, osd_file_url)

            if os.path.exists(osd_file_url_full):

                # we have this OSD tile cached on disk
                # print 'OSD CACHE HIT'
                osd_tile = cv2.imread(osd_file_url_full, 0)
                return cv2.imencode('.jpg', osd_tile)[1].tostring()

        view = self._views[data_path]

        # Create an empty dictionary for the View's luts64_map, if there isn't a map
        luts64_map = dict()
        if view._luts64_map is not None:
            luts64_map = view._luts64_map

        # calculate canvas coordinates
        x_c = x * settings.CLIENT_TILE_SIZE
        y_c = y * settings.CLIENT_TILE_SIZE
        w_c = settings.CLIENT_TILE_SIZE
        h_c = settings.CLIENT_TILE_SIZE

        top_left = [x_c, y_c]
        bottom_right = [x_c + w_c, y_c + h_c]

        # loop through all tiles and find ones which match the x_c, y_c, w_c,
        # h_c bounding box
        required_tiles = {}
        for t in view._tiles:
            tile_dict = view._tiles[t]

            tile = tile_dict['tile']
            # now the normalized coordinates which should match the coordinate
            # system
            tx = tile_dict['tx'] / 2**w
            ty = tile_dict['ty'] / 2**w
            width = tile_dict['width'] / 2**w
            height = tile_dict['height'] / 2**w
            t_top_left = [tx, ty]
            t_bottom_right = [tx + width, ty + height]

            comp0 = top_left[0] < t_bottom_right[0]
            comp1 = bottom_right[0] > t_top_left[0]
            comp2 = top_left[1] < t_bottom_right[1]
            comp3 = bottom_right[1] > t_top_left[1]

            overlapping = comp0 and comp1 and comp2 and comp3

            if overlapping:
                required_tiles[t] = tile_dict

        stitched_w = min(view._width / 2**w - x_c, settings.CLIENT_TILE_SIZE)
        stitched_h = min(view._height / 2**w - y_c, settings.CLIENT_TILE_SIZE)

        stitched = np.zeros((stitched_h, stitched_w), dtype=np.uint8)

        if settings.INVERT:
            stitched[:] = 255

        # sort the required tiles to always give priority in the same order
        required_tiles_keys = sorted(
            required_tiles, key=lambda key: required_tiles[key])

        for t in required_tiles_keys:

            tile_dict = required_tiles[t]
            tile = tile_dict['tile']

            # fov paths need to be treated differently
            if self.check_path_type(data_path) != 'FOV':
                t_abs_data_path = os.path.join(data_path, tile_dict['fov'])
            else:
                t_abs_data_path = data_path

            # print 'LOADING', os.path.join(t_abs_data_path, tile._filename)
            if t in self._tiles.keys() and w in self._tiles[t]:
                current_tile = self._tiles[t][w]
                # print 'CACHE HIT'
            else:
                #
                # we add to cache
                #
                # print "Loading lut64_map of: {} --> {}".format(tile.id, luts64_map.get(os.path.split(tile.id)[-1].lower(), None))
                tile_img = tile.load(t_abs_data_path, settings.IMAGE_PREFIX, lut_base64=luts64_map.get(os.path.split(tile.id)[-1].lower(), None))

                current_tile = Manager.downsample_image(tile_img, 2**w)
                self._tiles[t] = {w: current_tile}

            # stitch it in our little openseadragon tile
            tx = tile_dict['tx'] / 2**w
            ty = tile_dict['ty'] / 2**w
            t_width = tile_dict['width'] / 2**w
            t_height = tile_dict['height'] / 2**w

            stitched_x = int(max(tx, top_left[0]) - top_left[0])
            stitched_y = int(max(ty, top_left[1]) - top_left[1])

            stitched_w = int(
                min(t_width - max(top_left[0] - tx, 0),
                    settings.CLIENT_TILE_SIZE - stitched_x))
            stitched_h = int(
                min(t_height - max(top_left[1] - ty, 0),
                    settings.CLIENT_TILE_SIZE - stitched_y))

            t_sub_x = int(max(tx, top_left[0]) - tx)
            t_sub_y = int(max(ty, top_left[1]) - ty)

            stitched[
                stitched_y:stitched_y +
                stitched_h,
                stitched_x:stitched_x +
                stitched_w] = current_tile[
                    t_sub_y:t_sub_y +
                    stitched_h,
                    t_sub_x:t_sub_x +
                    stitched_w]

        if settings.INVERT:
            stitched = 255 - stitched

        if settings.CACHE_CLIENT_TILES:
            # print 'Writing OSD tile', osd_file_url_full
            cv2.imwrite(osd_file_url_full, stitched)

        return cv2.imencode('.jpg', stitched)[1].tostring()

    # Helping function
    @staticmethod
    def downsample_image(imagedata, factor):
        '''
        '''
        if factor == 1.:
            return imagedata

        factor = 1. / factor
        return cv2.resize(imagedata, (0, 0), fx=factor,
                          fy=factor, interpolation=cv2.INTER_LINEAR)
예제 #44
0
파일: test_lru.py 프로젝트: fabioz/lru-dict
 def test_add_within_size(self):
     for size in SIZES:
         l = LRU(size)
         for i in xrange(size):
             l[i] = str(i)
         self._check_kvi(range(size - 1, -1, -1), l)
예제 #45
0
class topic4:
    def __init__(self, c_hash, c_user, c_words):
        self.topic_count =1
        self.l1 = LRU(c_hash)
        self.l2 = LRU(c_user)
        self.l3 = LRU(c_words)

    def set_hashLRU(self,l):
        self.set(self.l1, l)

    def set_userLRU(self,l):
        self.set(self.l2, l)

    def set_wordLRU(self,l):
        self.set(self.l3, l)

    def set(self, lru, l):
        for k in l:
            v = lru.get(k,0)
            lru[k]=v+1

    def set_cluster(self, hashtags, users, words):
        for k in hashtags:
            self.l1[k]=self.l1.get(k,0)+1
        for k in users:
            self.l2[k]=self.l2.get(k,0)+1
        for k in words:
            self.l3[k]=self.l3.get(k,0)+1
        self.topic_count+=1

    def get_similarity(self,hashtags,users,words):
        h_sum = 1
        u_sum = 1
        w_sum = 1
        h_match =0
        h_ind =0
        u_ind =0
        w_ind =0
        c=0
        h1 = self.l1.get_size()
        u1 = self.l2.get_size()
        w1 = self.l3.get_size()
        for h in hashtags:
            # l1_items=zip(*self.l1.items())
            h_sum+= self.l1.get(h,0)
            if(self.l1.has_key(h)):
                ind = self.l1.keys().index(h)
                h_ind+= h1 - ind
                h_match+= 1 if ind<250 else 0
        for u in users:
            u_sum+= self.l2.get(u,0)
            if(self.l2.has_key(u)):
                u_ind+= u1 - self.l2.keys().index(u)
        for w in words:
            w_sum+= self.l3.get(w,0)
            if(self.l3.has_key(w)):
                w_ind+= w1 - self.l3.keys().index(w)
        if(h_match !=0):
            c = h_match -1
        # print(h_ind,h1,u_ind,u1,w_ind,w1, h_sum,w_sum,)
        similarity = (h_ind/(h1+1))*(h_sum/sum(self.l1.values() +[1])) + (u_ind/(u1+1))*(u_sum/sum(self.l2.values()+[1])) + (w_ind/(w1+1))*(w_sum/sum(self.l3.values()+[1])) +c
        return similarity
예제 #46
0
class ColorsPlan(Plan):
    """
    Return ``[[hsbk, ...]]`` for all the items in the chain of the device.

    So for a bulb you'll get ``[[<hsbk>]]``.

    For a Strip or candle you'll get ``[[<hsbk>, <hsbk>, ...]]``

    And for a tile you'll get ``[[<hsbk>, <hsbk>, ...], [<hsbk>, <hsbk>, ...]]``

    Where ``<hsbk>`` is a :ref:`photons_messages.fields.color` object.
    """

    HSBKCache = LRU(3000)
    colors_struct = struct.Struct("<" + "H" * 64 * 4)

    default_refresh = 1

    @property
    def dependant_info(kls):
        return {"c": CapabilityPlan(), "chain": ChainPlan(), "zones": ZonesPlan()}

    class Instance(Plan.Instance):
        def setup(self):
            self.result = []

        @property
        def is_light(self):
            return self.deps["c"]["cap"].is_light

        @property
        def zones(self):
            return self.deps["c"]["cap"].zones

        @property
        def messages(self):
            if not self.is_light:
                return Skip

            if self.zones is Zones.SINGLE:
                return [LightMessages.GetColor()]

            elif self.zones is Zones.MATRIX:
                return [
                    TileMessages.Get64(
                        x=0, y=0, tile_index=0, length=255, width=self.deps["chain"]["width"]
                    )
                ]

            else:
                return []

        def process(self, pkt):
            if self.zones is Zones.LINEAR:
                self.result = [(0, [c for _, c in self.deps["zones"]])]
                return True

            if self.zones is Zones.SINGLE and pkt | LightMessages.LightState:
                self.result = [(0, [Color(pkt.hue, pkt.saturation, pkt.brightness, pkt.kelvin)])]
                return True

            if pkt | TileMessages.State64:
                colors = self.deps["chain"]["reverse_orient"](pkt.tile_index, pkt.colors)
                self.result.append((pkt.tile_index, colors))

                if len(self.result) == len(self.deps["chain"]["chain"]):
                    return True

        async def info(self):
            return [colors for _, colors in sorted(self.result)]
예제 #47
0
 def test_update(self):
     l = LRU(2)
     l['a'] = 1
     self.assertEqual(l['a'], 1)
     l.update(a=2)
     self.assertEqual(l['a'], 2)
     l['b'] = 2
     self.assertEqual(l['b'], 2)
     l.update(b=3)
     self.assertEqual(('b', 3), l.peek_first_item())
     self.assertEqual(l['a'], 2)
     self.assertEqual(l['b'], 3)
     l.update({'a':1, 'b':2})
     self.assertEqual(('b', 2), l.peek_first_item())
     self.assertEqual(l['a'], 1)
     self.assertEqual(l['b'], 2)
     l.update()
     self.assertEqual(('b', 2), l.peek_first_item())
     l.update(a=2)
     self.assertEqual(('a', 2), l.peek_first_item())
예제 #48
0
파일: test_lru.py 프로젝트: fabioz/lru-dict
    def test_lru(self):
        l = LRU(1)
        l['a'] = 1
        l['a']
        self.assertEqual(l.keys(), ['a'])
        l['b'] = 2
        self.assertEqual(l.keys(), ['b'])

        l = LRU(2)
        l['a'] = 1
        l['b'] = 2
        self.assertEqual(len(l), 2)
        l['a']  # Testing the first one
        l['c'] = 3
        self.assertEqual(sorted(l.keys()), ['a', 'c'])
        l['c']
        self.assertEqual(sorted(l.keys()), ['a', 'c'])

        l = LRU(3)
        l['a'] = 1
        l['b'] = 2
        l['c'] = 3
        self.assertEqual(len(l), 3)
        l['b']  # Testing the middle one
        l['d'] = 4
        self.assertEqual(sorted(l.keys()), ['b', 'c', 'd'])
        l['d']  # Testing the last one
        self.assertEqual(sorted(l.keys()), ['b', 'c', 'd'])
        l['e'] = 5
        self.assertEqual(sorted(l.keys()), ['b', 'd', 'e'])
예제 #49
0
    def test_callback(self):

        counter = [0]

        first_key = 'a'
        first_value = 1

        def callback(key, value):
            self.assertEqual(key, first_key)
            self.assertEqual(value, first_value)
            counter[0] += 1

        l = LRU(1, callback=callback)
        l[first_key] = first_value
        l['b'] = 1              # test calling the callback

        self.assertEqual(counter[0], 1)
        self.assertEqual(l.keys(), ['b'])

        l['b'] = 2              # doesn't call callback
        self.assertEqual(counter[0], 1)
        self.assertEqual(l.keys(), ['b'])
        self.assertEqual(l.values(), [2])


        l = LRU(1, callback=callback)
        l[first_key] = first_value

        l.set_callback(None)
        l['c'] = 1              # doesn't call callback
        self.assertEqual(counter[0], 1)
        self.assertEqual(l.keys(), ['c'])

        l.set_callback(callback)
        del l['c']              # doesn't call callback
        self.assertEqual(counter[0], 1)
        self.assertEqual(l.keys(), [])

        l = LRU(2, callback=callback)
        l['a'] = 1              # test calling the callback
        l['b'] = 2              # test calling the callback

        self.assertEqual(counter[0], 1)
        self.assertEqual(l.keys(), ['b', 'a'])
        l.set_size(1)
        self.assertEqual(counter[0], 2) # callback invoked
        self.assertEqual(l.keys(), ['b'])
from lru import LRU

from guillotina_authentication.user import OAuthUser
from guillotina_authentication import utils
from guillotina.contrib import cache
from guillotina.exceptions import ContainerNotFound
from guillotina.utils import get_current_container

logger = logging.getLogger(__name__)

USER_CACHE_DURATION = 60 * 1
NON_IAT_VERIFY = {
    'verify_iat': False,
}

LOCAL_CACHE = LRU(100)


class OAuthClientIdentifier:
    def get_user_cache_key(self, login):
        return '{}-{}-{}'.format(
            getattr(self.request, '_container_id', 'root'), login,
            math.ceil(math.ceil(time.time()) / USER_CACHE_DURATION))

    async def get_user(self, token):
        if token.get('type') not in ('bearer', 'wstoken', 'cookie'):
            return

        if '.' not in token.get('token', ''):
            # quick way to check if actually might be jwt
            return
예제 #51
0
 def test_capacity_get(self):
     for size in SIZES:
         l = LRU(size)
         self.assertTrue(size == l.get_size())
예제 #52
0
 def __init__(self, size):
     self.mac = LRU(size)
     self.ssid = LRU(size)
     self.vendor = LRU(size)
예제 #53
0
파일: test_lru.py 프로젝트: fabioz/lru-dict
 def test_empty(self):
     l = LRU(1)
     self.assertEquals([], l.keys())
     self.assertEquals([], l.values())
예제 #54
0
    lambda value: float(value) / 32767).allow_float()

hsbk_with_optional = (("hue", scaled_hue.optional()),
                      ("saturation", scaled_to_65535.optional()),
                      ("brightness", scaled_to_65535.optional()),
                      ("kelvin", T.Uint16.optional()))

hsbk = (("hue", scaled_hue), ("saturation", scaled_to_65535),
        ("brightness", scaled_to_65535), ("kelvin", T.Uint16.default(3500)))


class Color(dictobj.PacketSpec):
    fields = hsbk


Color.Meta.cache = LRU(8000)

tile_state_device = (("accel_meas_x", T.Int16), ("accel_meas_y", T.Int16),
                     ("accel_meas_z", T.Int16), ("reserved6", T.Reserved(16)),
                     ("user_x", T.Float), ("user_y", T.Float),
                     ("width", T.Uint8), ("height", T.Uint8), ("reserved7",
                                                               T.Reserved(8)),
                     ("device_version_vendor",
                      T.Uint32), ("device_version_product", T.Uint32),
                     ("device_version_version",
                      T.Uint32), ("firmware_build",
                                  T.Uint64), ("reserved8", T.Reserved(64)),
                     ("firmware_version",
                      T.Uint32.version_number()), ("reserved9",
                                                   T.Reserved(32)))
예제 #55
0
    def test_stats(self):
        for size in SIZES:
            l = LRU(size)
            for i in range(size):
                l[i] = str(i)

            self.assertTrue(l.get_stats() == (0, 0))

            val = l[0]
            self.assertTrue(l.get_stats() == (1, 0))

            val = l.get(0, None)
            self.assertTrue(l.get_stats() == (2, 0))

            val = l.get(-1, None)
            self.assertTrue(l.get_stats() == (2, 1))

            try:
                val = l[-1]
            except:
                pass

            self.assertTrue(l.get_stats() == (2, 2))

            l.clear()
            self.assertTrue(len(l) == 0)
            self.assertTrue(l.get_stats() == (0, 0))
예제 #56
0
 def reset_cache(self) -> None:
     self._cached_values = LRU(self._cache_size)
예제 #57
0
 def test_empty(self):
     l = LRU(1)
     self.assertEquals([], l.keys())
     self.assertEquals([], l.values())
예제 #58
0
파일: fcp.py 프로젝트: verolero86/pcircle
class FCP(BaseTask):
    def __init__(self, circle, src, dest,
                 treewalk=None,
                 totalsize=0,
                 hostcnt=0,
                 prune=False,
                 verify=False,
                 resume=False,
                 workq=None):
        BaseTask.__init__(self, circle)
        self.circle = circle
        self.treewalk = treewalk
        self.totalsize = totalsize
        self.prune = prune
        self.workq = workq
        self.resume = resume
        self.checkpoint_file = None
        self.src = src
        self.dest = os.path.abspath(dest)

        # cache, keep the size conservative
        # TODO: we need a more portable LRU size

        if hostcnt != 0:
            max_ofile, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
            procs_per_host = self.circle.size / hostcnt
            self._read_cache_limit = ((max_ofile - 64) / procs_per_host) / 3
            self._write_cache_limit = ((max_ofile - 64) / procs_per_host) * 2 / 3

        if self._read_cache_limit <= 0 or self._write_cache_limit <= 0:
            self._read_cache_limit = 1
            self._write_cache_limit = 8

        self.rfd_cache = LRU(self._read_cache_limit)
        self.wfd_cache = LRU(self._write_cache_limit)

        self.cnt_filesize_prior = 0
        self.cnt_filesize = 0

        self.blocksize = 1024 * 1024
        self.chunksize = 1024 * 1024

        # debug
        self.d = {"rank": "rank %s" % circle.rank}
        self.wtime_started = MPI.Wtime()
        self.wtime_ended = None
        self.workcnt = 0  # this is the cnt for the enqued items
        self.reduce_items = 0  # this is the cnt for processed items
        if self.treewalk:
            log.debug("treewalk files = %s" % treewalk.flist, extra=self.d)

        # fini_check
        self.fini_cnt = Counter()

        # verify
        self.verify = verify
        self.chunksums = []

        # checkpointing
        self.checkpoint_interval = sys.maxsize
        self.checkpoint_last = MPI.Wtime()

        if self.circle.rank == 0:
            print("Start copying process ...")

    def rw_cache_limit(self):
        return (self._read_cache_limit, self._write_cache_limit)

    def set_fixed_chunksize(self, sz):
        self.chunksize = sz

    def set_adaptive_chunksize(self, totalsz):
        self.chunksize = utils.calc_chunksize(totalsz)
        if self.circle.rank == 0:
            print("Adaptive chunksize: %s" % bytes_fmt(self.chunksize))

    def cleanup(self):
        for f in self.rfd_cache.values():
            try:
                os.close(f)
            except OSError as e:
                pass

        for f in self.wfd_cache.values():
            try:
                os.close(f)
            except OSError as e:
                pass

        # remove checkpoint file
        if self.checkpoint_file and os.path.exists(self.checkpoint_file):
            os.remove(self.checkpoint_file)

        # we need to do this because if last job didn't finish cleanly
        # the fwalk files can be found as leftovers
        # and if fcp cleanup has a chance, it should clean up that

        fwalk = "%s/fwalk.%s" % (self.circle.tempdir, self.circle.rank)
        if os.path.exists(fwalk):
            os.remove(fwalk)

    def new_fchunk(self, fitem):
        fchunk = FileChunk()  # default cmd = copy
        fchunk.src = fitem.path
        fchunk.dest = destpath(fitem, self.dest)
        return fchunk

    def enq_file(self, fi):
        """ Process a single file, represented by "fi" - FileItem
        It involves chunking this file and equeue all chunks. """

        chunks = fi.st_size / self.chunksize
        remaining = fi.st_size % self.chunksize

        workcnt = 0

        if fi.st_size == 0:  # empty file
            fchunk = self.new_fchunk(fi)
            fchunk.offset = 0
            fchunk.length = 0
            self.enq(fchunk)
            workcnt += 1
        else:
            for i in range(chunks):
                fchunk = self.new_fchunk(fi)
                fchunk.offset = i * self.chunksize
                fchunk.length = self.chunksize
                self.enq(fchunk)
            workcnt += chunks

        if remaining > 0:
            # send remainder
            fchunk = self.new_fchunk(fi)
            fchunk.offset = chunks * self.chunksize
            fchunk.length = remaining
            self.enq(fchunk)
            workcnt += 1

        # save work cnt
        self.workcnt += workcnt

        log.debug("enq_file(): %s, size = %s, workcnt = %s" % (fi.path, fi.st_size, workcnt),
                     extra=self.d)

    def handle_fitem(self, fi):
        if os.path.islink(fi.path):
            dest = destpath(fi, self.dest)
            linkto = os.readlink(fi.path)
            try:
                os.symlink(linkto, dest)
            except Exception as e:
                log.debug("%s, skipping sym link %s" % (e, fi.path), extra=self.d)
        elif stat.S_ISREG(fi.st_mode):
            self.enq_file(fi)  # where chunking takes place

    def create(self):
        """ Each task has one create(), which is invoked by circle ONCE.
        For FCP, each task will handle_fitem() -> enq_file()
        to process each file gathered during the treewalk stage. """

        if not G.use_store and self.workq:  # restart
            self.setq(self.workq)
            return

        if self.resume:
            return

        # construct and enable all copy operations
        # we batch operation hard-coded
        log.info("create() starts, flist length = %s" % len(self.treewalk.flist),
                    extra=self.d)

        if G.use_store:
            while self.treewalk.flist.qsize > 0:
                fitems, _ = self.treewalk.flist.mget(G.DB_BUFSIZE)
                for fi in fitems:
                    self.handle_fitem(fi)
                self.treewalk.flist.mdel(G.DB_BUFSIZE)

            # store checkpoint
            log.debug("dbname = %s" % self.circle.dbname)
            dirname = os.path.dirname(self.circle.dbname)
            basename = os.path.basename(self.circle.dbname)
            chkpointname = basename + ".CHECK_OK"
            self.checkpoint_file = os.path.join(dirname, chkpointname)
            with open(self.checkpoint_file, "w") as f:
                f.write("%s" % self.totalsize)

        else:  # use memory
            for fi in self.treewalk.flist:
                self.handle_fitem(fi)

            # memory-checkpoint
            if self.checkpoint_file:
                self.do_no_interrupt_checkpoint()
                self.checkpoint_last = MPI.Wtime()

    def do_open(self, k, d, flag, limit):
        """
        @param k: the file path
        @param d: dictionary of <path, file descriptor>
        @return: file descriptor
        """
        if d.has_key(k):
            return d[k]

        if len(d.keys()) >= limit:
            # over the limit
            # clean up the least used
            old_k, old_v = d.items()[-1]
            try:
                os.close(old_v)
            except OSError as e:
                log.warn("FD for %s not valid when closing" % old_k, extra=self.d)

        fd = -1
        try:
            fd = os.open(k, flag)
        except OSError as e:
            if e.errno == 28:  # no space left
                log.error("Critical error: %s, exit!" % e, extra=self.d)
                self.circle.exit(0)  # should abort
            else:
                log.error("OSError({0}):{1}, skipping {2}".format(e.errno, e.strerror, k), extra=self.d)
        else:
            if fd > 0:
                d[k] = fd
        finally:
            return fd

    @staticmethod
    def do_mkdir(work):
        src = work.src
        dest = work.dest
        if not os.path.exists(dest):
            os.makedirs(dest)

    def do_copy(self, work):
        src = work.src
        dest = work.dest

        basedir = os.path.dirname(dest)
        if not os.path.exists(basedir):
            os.makedirs(basedir)

        rfd = self.do_open(src, self.rfd_cache, os.O_RDONLY, self._read_cache_limit)
        if rfd < 0:
            return False
        wfd = self.do_open(dest, self.wfd_cache, os.O_WRONLY | os.O_CREAT, self._write_cache_limit)
        if wfd < 0:
            if args.force:
                try:
                    os.unlink(dest)
                except OSError as e:
                    log.error("Failed to unlink %s, %s " % (dest, e), extra=self.d)
                    return False
                else:
                    wfd = self.do_open(dest, self.wfd_cache, os.O_WRONLY, self._write_cache_limit)
            else:
                log.error("Failed to create output file %s" % dest, extra=self.d)
                return False

        # do the actual copy
        self.write_bytes(rfd, wfd, work)

        # update tally
        self.cnt_filesize += work.length

        if G.verbosity > 2:
            log.debug("Transferred %s bytes from:\n\t [%s] to [%s]" %
                         (self.cnt_filesize, src, dest), extra=self.d)

        return True

    def do_no_interrupt_checkpoint(self):
        a = Thread(target=self.do_checkpoint)
        a.start()
        a.join()
        log.debug("checkpoint: %s" % self.checkpoint_file, extra=self.d)

    def do_checkpoint(self):
        for k in self.wfd_cache.keys():
            os.close(self.wfd_cache[k])

        # clear the cache
        self.wfd_cache.clear()

        tmp_file = self.checkpoint_file + ".part"
        with open(tmp_file, "wb") as f:
            cobj = Checkpoint(self.src, self.dest, self.get_workq(), self.totalsize)
            pickle.dump(cobj, f, pickle.HIGHEST_PROTOCOL)
        # POSIX requires rename to be atomic
        os.rename(tmp_file, self.checkpoint_file)

    def process(self):
        """
        The only work is "copy"
        TODO: clean up other actions such as mkdir/fini_check
        """
        if not G.use_store:
            curtime = MPI.Wtime()
            if curtime - self.checkpoint_last > self.checkpoint_interval:
                self.do_no_interrupt_checkpoint()
                log.info("Checkpointing done ...", extra=self.d)
                self.checkpoint_last = curtime

        work = self.deq()
        self.reduce_items += 1
        if isinstance(work, FileChunk):
            self.do_copy(work)
        else:
            log.warn("Unknown work object: %s" % work, extra=self.d)

    def reduce_init(self, buf):
        buf['cnt_filesize'] = self.cnt_filesize

    def reduce(self, buf1, buf2):
        buf1['cnt_filesize'] += buf2['cnt_filesize']
        return buf1

    def reduce_report(self, buf):
        out = ""
        if self.totalsize != 0:
            out += "%.2f %% finished, " % (100 * float(buf['cnt_filesize']) / self.totalsize)

        out += "%s copied" % bytes_fmt(buf['cnt_filesize'])

        if self.circle.reduce_time_interval != 0:
            rate = float(buf['cnt_filesize'] - self.cnt_filesize_prior) / self.circle.reduce_time_interval
            self.cnt_filesize_prior = buf['cnt_filesize']
            out += ", estimated transfer rate: %s/s" % bytes_fmt(rate)

        print(out)

    def reduce_finish(self, buf):
        # self.reduce_report(buf)
        pass

    def epilogue(self):
        global taskloads
        self.wtime_ended = MPI.Wtime()
        taskloads = self.circle.comm.gather(self.reduce_items)
        if self.circle.rank == 0:
            if self.totalsize == 0:
                print("\nZero filesize detected, done.\n")
                return
            tlapse = self.wtime_ended - self.wtime_started
            rate = float(self.totalsize) / tlapse
            print("\nFCP Epilogue:\n")
            print("\t{:<20}{:<20}".format("Ending at:", utils.current_time()))
            print("\t{:<20}{:<20}".format("Completed in:", utils.conv_time(tlapse)))
            print("\t{:<20}{:<20}".format("Transfer Rate:", "%s/s" % bytes_fmt(rate)))
            print("\t{:<20}{:<20}".format("FCP Loads:", "%s" % taskloads))

    def read_then_write(self, rfd, wfd, work, num_of_bytes, m):
        """ core entry point for copy action: first read then write.

        @param num_of_bytes: the exact amount of bytes we will copy
        @return: False if unsuccessful.

        """
        buf = None
        try:
            buf = readn(rfd, num_of_bytes)
        except IOError:
            self.logger.error("Failed to read %s", work.src, extra=self.d)
            return False

        try:
            writen(wfd, buf)
        except IOError:
            self.logger.error("Failed to write %s", work.dest, extra=self.d)
            return False

        if m:
            m.update(buf)

        return True

    def write_bytes(self, rfd, wfd, work):
        os.lseek(rfd, work.offset, os.SEEK_SET)
        os.lseek(wfd, work.offset, os.SEEK_SET)

        m = None
        if self.verify:
            m = hashlib.sha1()

        remaining = work.length
        while remaining != 0:
            if remaining >= self.blocksize:
                self.read_then_write(rfd, wfd, work, self.blocksize, m)
                remaining -= self.blocksize
            else:
                self.read_then_write(rfd, wfd, work, remaining, m)
                remaining = 0

        if self.verify:
            # use src path here
            ck = ChunkSum(work.src, offset=work.offset, length=work.length,
                          digest=m.hexdigest())
            self.chunksums.append(ck)
예제 #59
0
messages every 0.075 seconds we can't make them fast enough.

This file contains a more manual implementation of the Set64 message that tries
to be as efficient as possible to allow us to keep up with the animation.
"""
from photons_messages import TileMessages, MultiZoneMessages
from photons_protocol.packing import PacketPacking
from photons_messages.fields import Color

from delfick_project.norms import sb
from lru import LRU
import binascii
import bitarray
import struct

ColorCache = LRU(0xFFFF)

TargetCache = LRU(1000)

seed_set64 = TileMessages.Set64.create(
    source=0,
    sequence=0,
    target="d073d5000000",
    res_required=False,
    ack_required=False,
    tile_index=0,
    length=1,
    x=0,
    y=0,
    width=8,
    duration=0,
예제 #60
0
class AccountDB(AccountDatabaseAPI):
    logger = get_extended_debug_logger('eth.db.account.AccountDB')

    def __init__(self,
                 db: AtomicDatabaseAPI,
                 state_root: Hash32 = BLANK_ROOT_HASH) -> None:
        r"""
        Internal implementation details (subject to rapid change):
        Database entries go through several pipes, like so...

        .. code::

            db > _batchdb ---------------------------> _journaldb ----------------> code lookups
             \
              -> _batchtrie -> _trie -> _trie_cache -> _journaltrie --------------> account lookups

        Journaling sequesters writes at the _journal* attrs ^, until persist is called.

        _batchtrie enables us to prune all trie changes while building
        state,  without deleting old trie roots.

        _batchdb and _batchtrie together enable us to make the state root,
        without saving everything to the database.

        _journaldb is a journaling of the keys and values used to store
        code and account storage.

        _trie is a hash-trie, used to generate the state root

        _trie_cache is a cache tied to the state root of the trie. It
        is important that this cache is checked *after* looking for
        the key in _journaltrie, because the cache is only invalidated
        after a state root change.

        _journaltrie is a journaling of the accounts (an address->rlp mapping,
        rather than the nodes stored by the trie). This enables
        a squashing of all account changes before pushing them into the trie.

        .. NOTE:: StorageDB works similarly

        AccountDB synchronizes the snapshot/revert/persist of both of the
        journals.
        """
        self._raw_store_db = db
        self._batchdb = BatchDB(db)
        self._batchtrie = BatchDB(db, read_through_deletes=True)
        self._journaldb = JournalDB(self._batchdb)
        self._trie = HashTrie(
            HexaryTrie(self._batchtrie, state_root, prune=True))
        self._trie_cache = CacheDB(self._trie)
        self._journaltrie = JournalDB(self._trie_cache)
        self._account_cache = LRU(2048)
        self._account_stores: Dict[Address, AccountStorageDatabaseAPI] = {}
        self._dirty_accounts: Set[Address] = set()
        self._root_hash_at_last_persist = state_root

    @property
    def state_root(self) -> Hash32:
        return self._trie.root_hash

    @state_root.setter
    def state_root(self, value: Hash32) -> None:
        if self._trie.root_hash != value:
            self._trie_cache.reset_cache()
            self._trie.root_hash = value

    def has_root(self, state_root: bytes) -> bool:
        return state_root in self._batchtrie

    #
    # Storage
    #
    def get_storage(self,
                    address: Address,
                    slot: int,
                    from_journal: bool = True) -> int:
        validate_canonical_address(address, title="Storage Address")
        validate_uint256(slot, title="Storage Slot")

        account_store = self._get_address_store(address)
        return account_store.get(slot, from_journal)

    def set_storage(self, address: Address, slot: int, value: int) -> None:
        validate_uint256(value, title="Storage Value")
        validate_uint256(slot, title="Storage Slot")
        validate_canonical_address(address, title="Storage Address")

        account_store = self._get_address_store(address)
        self._dirty_accounts.add(address)
        account_store.set(slot, value)

    def delete_storage(self, address: Address) -> None:
        validate_canonical_address(address, title="Storage Address")

        self._set_storage_root(address, BLANK_ROOT_HASH)
        self._wipe_storage(address)

    def _wipe_storage(self, address: Address) -> None:
        """
        Wipe out the storage, without explicitly handling the storage root update
        """
        account_store = self._get_address_store(address)
        self._dirty_accounts.add(address)
        account_store.delete()

    def _get_address_store(self,
                           address: Address) -> AccountStorageDatabaseAPI:
        if address in self._account_stores:
            store = self._account_stores[address]
        else:
            storage_root = self._get_storage_root(address)
            store = AccountStorageDB(self._raw_store_db, storage_root, address)
            self._account_stores[address] = store
        return store

    def _dirty_account_stores(
            self) -> Iterable[Tuple[Address, AccountStorageDatabaseAPI]]:
        for address in self._dirty_accounts:
            store = self._account_stores[address]
            yield address, store

    @to_tuple
    def _get_changed_roots(self) -> Iterable[Tuple[Address, Hash32]]:
        # list all the accounts that were changed, and their new storage roots
        for address, store in self._dirty_account_stores():
            if store.has_changed_root:
                yield address, store.get_changed_root()

    def _get_storage_root(self, address: Address) -> Hash32:
        account = self._get_account(address)
        return account.storage_root

    def _set_storage_root(self, address: Address,
                          new_storage_root: Hash32) -> None:
        account = self._get_account(address)
        self._set_account(address, account.copy(storage_root=new_storage_root))

    def _validate_flushed_storage(self, address: Address,
                                  store: AccountStorageDatabaseAPI) -> None:
        if store.has_changed_root:
            actual_storage_root = self._get_storage_root(address)
            expected_storage_root = store.get_changed_root()
            if expected_storage_root != actual_storage_root:
                raise ValidationError(
                    "Storage root was not saved to account before trying to persist roots. "
                    f"Account {address!r} had storage {actual_storage_root!r}, "
                    f"but should be {expected_storage_root!r}.")

    #
    # Balance
    #
    def get_balance(self, address: Address) -> int:
        validate_canonical_address(address, title="Storage Address")

        account = self._get_account(address)
        return account.balance

    def set_balance(self, address: Address, balance: int) -> None:
        validate_canonical_address(address, title="Storage Address")
        validate_uint256(balance, title="Account Balance")

        account = self._get_account(address)
        self._set_account(address, account.copy(balance=balance))

    #
    # Nonce
    #
    def get_nonce(self, address: Address) -> int:
        validate_canonical_address(address, title="Storage Address")

        account = self._get_account(address)
        return account.nonce

    def set_nonce(self, address: Address, nonce: int) -> None:
        validate_canonical_address(address, title="Storage Address")
        validate_uint256(nonce, title="Nonce")

        account = self._get_account(address)
        self._set_account(address, account.copy(nonce=nonce))

    def increment_nonce(self, address: Address) -> None:
        current_nonce = self.get_nonce(address)
        self.set_nonce(address, current_nonce + 1)

    #
    # Code
    #
    def get_code(self, address: Address) -> bytes:
        validate_canonical_address(address, title="Storage Address")

        code_hash = self.get_code_hash(address)
        if code_hash == EMPTY_SHA3:
            return b''
        else:
            try:
                return self._journaldb[code_hash]
            except KeyError:
                raise MissingBytecode(code_hash) from KeyError

    def set_code(self, address: Address, code: bytes) -> None:
        validate_canonical_address(address, title="Storage Address")
        validate_is_bytes(code, title="Code")

        account = self._get_account(address)

        code_hash = keccak(code)
        self._journaldb[code_hash] = code
        self._set_account(address, account.copy(code_hash=code_hash))

    def get_code_hash(self, address: Address) -> Hash32:
        validate_canonical_address(address, title="Storage Address")

        account = self._get_account(address)
        return account.code_hash

    def delete_code(self, address: Address) -> None:
        validate_canonical_address(address, title="Storage Address")

        account = self._get_account(address)
        self._set_account(address, account.copy(code_hash=EMPTY_SHA3))

    #
    # Account Methods
    #
    def account_has_code_or_nonce(self, address: Address) -> bool:
        return self.get_nonce(address) != 0 or self.get_code_hash(
            address) != EMPTY_SHA3

    def delete_account(self, address: Address) -> None:
        validate_canonical_address(address, title="Storage Address")

        if address in self._account_cache:
            del self._account_cache[address]
        del self._journaltrie[address]

        self._wipe_storage(address)

    def account_exists(self, address: Address) -> bool:
        validate_canonical_address(address, title="Storage Address")
        account_rlp = self._get_encoded_account(address, from_journal=True)
        return account_rlp != b''

    def touch_account(self, address: Address) -> None:
        validate_canonical_address(address, title="Storage Address")

        account = self._get_account(address)
        self._set_account(address, account)

    def account_is_empty(self, address: Address) -> bool:
        return not self.account_has_code_or_nonce(
            address) and self.get_balance(address) == 0

    #
    # Internal
    #
    def _get_encoded_account(self,
                             address: Address,
                             from_journal: bool = True) -> bytes:
        lookup_trie = self._journaltrie if from_journal else self._trie_cache

        try:
            return lookup_trie[address]
        except trie_exceptions.MissingTrieNode as exc:
            raise MissingAccountTrieNode(*exc.args) from exc
        except KeyError:
            # In case the account is deleted in the JournalDB
            return b''

    def _get_account(self,
                     address: Address,
                     from_journal: bool = True) -> Account:
        if from_journal and address in self._account_cache:
            return self._account_cache[address]

        rlp_account = self._get_encoded_account(address, from_journal)

        if rlp_account:
            account = rlp.decode(rlp_account, sedes=Account)
        else:
            account = Account()
        if from_journal:
            self._account_cache[address] = account
        return account

    def _set_account(self, address: Address, account: Account) -> None:
        self._account_cache[address] = account
        rlp_account = rlp.encode(account, sedes=Account)
        self._journaltrie[address] = rlp_account

    #
    # Record and discard API
    #
    def record(self) -> JournalDBCheckpoint:
        checkpoint = self._journaldb.record()
        self._journaltrie.record(checkpoint)

        for _, store in self._dirty_account_stores():
            store.record(checkpoint)
        return checkpoint

    def discard(self, checkpoint: JournalDBCheckpoint) -> None:
        self._journaldb.discard(checkpoint)
        self._journaltrie.discard(checkpoint)
        self._account_cache.clear()
        for _, store in self._dirty_account_stores():
            store.discard(checkpoint)

    def commit(self, checkpoint: JournalDBCheckpoint) -> None:
        self._journaldb.commit(checkpoint)
        self._journaltrie.commit(checkpoint)
        for _, store in self._dirty_account_stores():
            store.commit(checkpoint)

    def make_state_root(self) -> Hash32:
        for _, store in self._dirty_account_stores():
            store.make_storage_root()

        for address, storage_root in self._get_changed_roots():
            self.logger.debug2(
                "Updating account 0x%s to storage root 0x%s",
                address.hex(),
                storage_root.hex(),
            )
            self._set_storage_root(address, storage_root)

        self._journaldb.persist()

        diff = self._journaltrie.diff()
        # In addition to squashing (which is redundant here), this context manager causes
        # an atomic commit of the changes, so exceptions will revert the trie
        with self._trie.squash_changes() as memory_trie:
            self._apply_account_diff_without_proof(diff, memory_trie)

        self._journaltrie.reset()
        self._trie_cache.reset_cache()

        return self.state_root

    def persist(self) -> None:
        self.make_state_root()

        # persist storage
        with self._raw_store_db.atomic_batch() as write_batch:
            for address, store in self._dirty_account_stores():
                self._validate_flushed_storage(address, store)
                store.persist(write_batch)

        for address, new_root in self._get_changed_roots():
            if new_root not in self._raw_store_db and new_root != BLANK_ROOT_HASH:
                raise ValidationError(
                    "After persisting storage trie, a root node was not found. "
                    f"State root for account 0x{address.hex()} "
                    f"is missing for hash 0x{new_root.hex()}.")

        # reset local storage trackers
        self._account_stores = {}
        self._dirty_accounts = set()

        # persist accounts
        self._validate_generated_root()
        new_root_hash = self.state_root
        self.logger.debug2("Persisting new state root: 0x%s",
                           new_root_hash.hex())
        with self._raw_store_db.atomic_batch() as write_batch:
            self._batchtrie.commit_to(write_batch, apply_deletes=False)
            self._batchdb.commit_to(write_batch, apply_deletes=False)
        self._root_hash_at_last_persist = new_root_hash

    def _validate_generated_root(self) -> None:
        db_diff = self._journaldb.diff()
        if len(db_diff):
            raise ValidationError(
                f"AccountDB had a dirty db when it needed to be clean: {db_diff!r}"
            )
        trie_diff = self._journaltrie.diff()
        if len(trie_diff):
            raise ValidationError(
                f"AccountDB had a dirty trie when it needed to be clean: {trie_diff!r}"
            )

    def _log_pending_accounts(self) -> None:
        diff = self._journaltrie.diff()
        for address in sorted(diff.pending_keys()):
            account = self._get_account(Address(address))
            self.logger.debug2(
                "Pending Account %s: balance %d, nonce %d, storage root %s, code hash %s",
                to_checksum_address(address),
                account.balance,
                account.nonce,
                encode_hex(account.storage_root),
                encode_hex(account.code_hash),
            )
        for deleted_address in sorted(diff.deleted_keys()):
            cast_deleted_address = Address(deleted_address)
            self.logger.debug2(
                "Deleted Account %s, empty? %s, exists? %s",
                to_checksum_address(deleted_address),
                self.account_is_empty(cast_deleted_address),
                self.account_exists(cast_deleted_address),
            )

    def _apply_account_diff_without_proof(self, diff: DBDiff,
                                          trie: DatabaseAPI) -> None:
        """
        Apply diff of trie updates, when original nodes might be missing.
        Note that doing this naively will raise exceptions about missing nodes
        from *intermediate* trie roots. This captures exceptions and uses the previous
        trie root hash that will be recognized by other nodes.
        """
        # It's fairly common that when an account is deleted, we need to retrieve nodes
        # for accounts that were not needed during normal execution. We only need these
        # nodes to refactor the trie.
        for delete_key in diff.deleted_keys():
            try:
                del trie[delete_key]
            except trie_exceptions.MissingTrieNode as exc:
                self.logger.debug(
                    "Missing node while deleting account with key %s: %s",
                    encode_hex(delete_key),
                    exc,
                )
                raise MissingAccountTrieNode(
                    exc.missing_node_hash,
                    self._root_hash_at_last_persist,
                    exc.requested_key,
                ) from exc

        # It's fairly unusual, but possible, that setting an account will need unknown
        # nodes during a trie refactor. Here is an example that seems to cause it:
        #
        # Setup:
        #   - Root node is a branch, with 0 pointing to a leaf
        #   - The complete leaf key is (0, 1, 2), so (1, 2) is in the leaf node
        #   - We know the leaf node hash but not the leaf node body
        # Refactor that triggers missing node:
        #   - Add value with key (0, 3, 4)
        #   - We need to replace the current leaf node with a branch that points leaves at 1 and 3
        #   - The leaf for key (0, 1, 2) now contains only the (2) part, so needs to be rebuilt
        #   - We need the full body of the old (1, 2) leaf node, to rebuild

        for key, val in diff.pending_items():
            try:
                trie[key] = val
            except trie_exceptions.MissingTrieNode as exc:
                self.logger.debug(
                    "Missing node on account update key %s to %s: %s",
                    encode_hex(key),
                    encode_hex(val),
                    exc,
                )
                raise MissingAccountTrieNode(
                    exc.missing_node_hash,
                    self._root_hash_at_last_persist,
                    exc.requested_key,
                ) from exc