示例#1
0
文件: Core.py 项目: afcarl/butterfly
    def __init__(self, db):
        # Get DB Terms
        self._db = db
        RUNTIME = db.RUNTIME

        # Make Cache with keywords
        self._cache = Cache(RUNTIME)
示例#2
0
    def __init__(self, number_of_blocks, words_per_block, memory_address_size, byte_addressed=True):
        Cache.__init__(self, number_of_blocks, words_per_block, memory_address_size, byte_addressed=True)

        words_string = "word0"
        for i in range(words_per_block - 1):
            words_string += " word%d" % (i + 1,)
        self._cache_block = namedtuple("cache_block", "lastUsedTime tag %s" % words_string)
示例#3
0
 def test_020_banks(self):
     cache = Cache()
     cache.put("Bank1", CacheItem("key1", "val1"))
     cache.put("Bank2", CacheItem("key1", "val2"))
     v = cache.getBanks()
     self.assertIn("Bank1", v)
     self.assertIn("Bank2", v)
示例#4
0
def create_cache(records_file_name):
    cache = Cache()
    records_table = open(records_file_name, "r")
    for line in records_table:
        cache.add_static_record(Record.from_json(line))
    records_table.close()
    return cache
示例#5
0
class Memory(object):
    def __init__(self):
        self.n_writes = 0
        self.n_reads = 0

        """Get flags from command line"""
        l1i_flags = sys.argv[1:4]
        l1d_flags = sys.argv[4:7]
        l2_flags = sys.argv[7].split(':')

        """Instantiate Caches"""
        self.l1i = Cache(l1i_flags[0], l1i_flags[1], l1i_flags[2])
        self.l1d = Cache(l1d_flags[0], l1d_flags[1], l1d_flags[2])
        self.l2 = Cache(l2_flags[0], l2_flags[1], l2_flags[2])

    def read(self, end):
        self.l1i.findDirect(end)

    def write(self, end):
        pass

    def printB(self):
        self.l1i.printBits()
        
    def getMisses(self):
        misses = np.array(self.l1i.getMisses())
        misses += np.array(self.l1d.getMisses())
        misses += np.array(self.l2.getMisses())
        return misses
        
    def getHits(self):
        return self.l1i.n_hits + self.l1d.n_hits + self.l2.n_hits

    def getWriteMiss(self):
        return (self.l1i.getWriteMiss(), self.l1d.getWriteMiss(), self.l2.getWriteMiss())
示例#6
0
 def test_030_keys(self):
     cache = Cache()
     cache.put("Bank1", CacheItem("key1", "val1"))
     cache.put("Bank1", CacheItem("key2", "val2"))
     v = cache.getBank("Bank1").keys()
     self.assertIn("key1", v)
     self.assertIn("key2", v)
示例#7
0
def load_cached(cache_path, in_dir):
    """
    Wrapper-function for creating a DataSet-object, which will be
    loaded from a cache-file if it already exists, otherwise a new
    object will be created and saved to the cache-file.
    This is useful if you need to ensure the ordering of the
    filenames is consistent every time you load the data-set,
    for example if you use the DataSet-object in combination
    with Transfer Values saved to another cache-file, see e.g.
    Tutorial #09 for an example of this.
    :param cache_path:
        File-path for the cache-file.
    :param in_dir:
        Root-dir for the files in the data-set.
        This is an argument for the DataSet-init function.
    :return:
        The DataSet-object.
    """

    print("Creating dataset from the files in: " + in_dir)

    # If the object-instance for DataSet(in_dir=data_dir) already
    # exists in the cache-file then reload it, otherwise create
    # an object instance and save it to the cache-file for next time.

    cache = Cache()
    dataset = cache.cache_data(cache_path=cache_path,
                               fn=Dataset,
                               in_dir=in_dir)

    return dataset
示例#8
0
    def test_050_deleteNotExists(self):
        cache = Cache()

        with self.assertRaises(Exception) as context:
            cache.delete("Bank1", "key1")

        self.assertTrue("Key key1 doesn't exist" in str(context.exception))
示例#9
0
	def __init__(self):
		self.isServiceAvailable = True
		signal.signal(signal.SIGINT, self.exit_gracefully)
		signal.signal(signal.SIGTERM, self.exit_gracefully)

		self.logging = error_reporting.Client()
		self.cache = Cache(ttl=5)

		self.coinGecko = CoinGeckoAPI()
		self.lastBitcoinQuote = {
			"quotePrice": [0],
			"quoteVolume": None,
			"ticker": Ticker("BTCUSD", "BTCUSD", "BTC", "USD", "BTC/USD", hasParts=False),
			"exchange": None,
			"timestamp": time.time()
		}

		try:
			rawData = self.coinGecko.get_coin_by_id(id="bitcoin", localization="false", tickers=False, market_data=True, community_data=False, developer_data=False)
			self.lastBitcoinQuote["quotePrice"] = [rawData["market_data"]["current_price"]["usd"]]
			self.lastBitcoinQuote["quoteVolume"] = rawData["market_data"]["total_volume"]["usd"]
		except: pass

		context = zmq.Context.instance()
		self.socket = context.socket(zmq.ROUTER)
		self.socket.bind("tcp://*:6900")

		print("[Startup]: Quote Server is online")
示例#10
0
	def __init__(self, settings, datadir, log, mempool, netmagic,
		     readonly=False, fast_dbm=False):
		self.settings = settings
		self.log = log
		self.mempool = mempool
		self.readonly = readonly
		self.netmagic = netmagic
		self.fast_dbm = fast_dbm
		self.blk_cache = Cache(750)
		self.orphans = {}
		self.orphan_deps = {}
		if readonly:
			mode_str = 'r'
		else:
			mode_str = 'c'
			if fast_dbm:
				self.log.write("Opening database in fast mode")
				mode_str += 'f'
		self.misc = gdbm.open(datadir + '/misc.dat', mode_str)
		self.blocks = gdbm.open(datadir + '/blocks.dat', mode_str)
		self.height = gdbm.open(datadir + '/height.dat', mode_str)
		self.blkmeta = gdbm.open(datadir + '/blkmeta.dat', mode_str)
		self.tx = gdbm.open(datadir + '/tx.dat', mode_str)

		if 'height' not in self.misc:
			self.log.write("INITIALIZING EMPTY BLOCKCHAIN DATABASE")
			self.misc['height'] = str(-1)
			self.misc['msg_start'] = self.netmagic.msg_start
			self.misc['tophash'] = ser_uint256(0L)
			self.misc['total_work'] = hex(0L)

		if 'msg_start' not in self.misc or (self.misc['msg_start'] != self.netmagic.msg_start):
			self.log.write("Database magic number mismatch. Data corruption or incorrect network?")
			raise RuntimeError
示例#11
0
def crawlSingleLocation(id):
    url = 'https://www.geocaching.com/geocache/' + id
    opener = getURLOpener()

    with opener.open(url) as response:
        bs = soup(response.read().decode('utf-8'), 'lxml')
        userTokenIndex = int(str(bs).find("userToken = '") + 13)
        userToken = str(bs)[userTokenIndex : userTokenIndex + 167]

        cache = Cache()
        cache.id = id
        try:
            cache.name = bs.find(id="ctl00_ContentBody_CacheName").string
            cache.difficulty = bs.find(id="ctl00_ContentBody_uxLegendScale").img['alt'].split()[0]
            cache.terrain = bs.find(id="ctl00_ContentBody_Localize12").img['alt'].split()[0]
            cache.size = bs.find(id="ctl00_ContentBody_size").img['alt'].split()[1]
            lat, lon = splitLatLon(bs.find(id="uxLatLon").string)
            cache.lat = lat
            cache.lon = lon
            cache.creator = bs.find(id="ctl00_ContentBody_mcd1").a.string.split(',')[0]
        except:
            print("Error parsing fields of cache with ID {}".format(id))

        try:
            getUsers(userToken, cache)
        except:
            print("Error parsing users of cache with ID {}".format(id))
示例#12
0
 def __init__(self,
              nucleo,
              memoriaPrincipal,
              tcb,
              barrera,
              cacheHermana=None):
     threading.Thread.__init__(self)
     self.nucleo = nucleo
     self.name = nucleo
     self.nucleoHermano = None
     self.reloj = 0
     self.ir = []  #Instruction register
     self.cache = Cache(self, memoriaPrincipal, cacheHermana)
     self.tcb = tcb
     self.barrera = barrera
     self.programCounter = 384
     self.hililloActual = None
     self.registros = [
         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
         0, 0, 0, 0, 0, 0, 0, 0, 0, 0
     ]
     self.instructionSet = {
         5: "lw",
         19: "addi",
         37: "sw",
         56: "div",
         71: "add",
         72: "mul",
         83: "sub",
         99: "beq",
         100: "bne",
         103: "jalr",
         111: "jal",
         999: "FIN"
     }
示例#13
0
    def simulate(self):

        #model '0' is unified
        iaddress = []
        daddress = []
        if self.cacheModel == '0':
            address = th.trace_handler(self.cacheModel, self.filename)
            cache = Cache(self.addrSize, self.cacheSize, self.blockSize,
                          self.associativity, address)
            hitCount, totalAccesses, = cache.buildCache()
            self.display(totalAccesses, hitCount)

        #Split cache
        elif self.cacheModel == '1':
            access_type, address = th.trace_handler(self.cacheModel,
                                                    self.filename)
            for i in range(len(access_type)):
                if access_type[i] == '2':
                    iaddress.append(address[i])

                elif access_type[i] == '1' or '0':
                    daddress.append(address[i])

            icache = Cache(self.addrSize, self.icacheSize, self.blockSize,
                           self.associativity, iaddress)
            ihitCount, itotalAccesses, = icache.buildCache()

            dcache = Cache(self.addrSize, self.dcacheSize, self.blockSize,
                           self.associativity, daddress)
            dhitCount, dtotalAccesses = dcache.buildCache()

            totalAccesses = itotalAccesses + dtotalAccesses
            hitCount = ihitCount + dhitCount
            self.display(totalAccesses, hitCount)
示例#14
0
    def __init__(self):
        self.isServiceAvailable = True
        signal.signal(signal.SIGINT, self.exit_gracefully)
        signal.signal(signal.SIGTERM, self.exit_gracefully)

        self.logging = error_reporting.Client()
        self.cache = Cache()

        context = zmq.Context.instance()
        self.socket = context.socket(zmq.ROUTER)
        self.socket.bind("tcp://*:6900")

        print("[Startup]: Detail Server is online")
示例#15
0
    def __init__(self,
                 settings,
                 datadir,
                 log,
                 mempool,
                 netmagic,
                 readonly=False,
                 fast_dbm=False,
                 compression=False):
        self.settings = settings
        self.log = log
        self.mempool = mempool
        self.readonly = readonly
        self.netmagic = netmagic
        self.fast_dbm = fast_dbm
        self.blk_cache = Cache(1000)
        self.orphans = {}
        self.orphan_deps = {}
        self.compress_on_write = compression

        # LevelDB to hold:
        #    tx:*      transaction outputs
        #    misc:*    state
        #    height:*  list of blocks at height h
        #    blkmeta:* block metadata
        #    blocks:*  block seek point in stream
        self.blk_write = io.BufferedWriter(
            io.FileIO(datadir + '/blocks.dat', 'ab'))
        self.blk_read = io.BufferedReader(
            io.FileIO(datadir + '/blocks.dat', 'rb'))
        self.db = leveldb.LevelDB(datadir + '/leveldb')

        try:
            self.db.Get('misc:height')
        except KeyError:
            self.log.write("INITIALIZING EMPTY BLOCKCHAIN DATABASE")
            batch = leveldb.WriteBatch()
            batch.Put('misc:height', str(-1))
            batch.Put('misc:msg_start', self.netmagic.msg_start)
            batch.Put('misc:tophash', ser_uint256(0L))
            batch.Put('misc:total_work', hex(0L))
            self.db.Write(batch)

        try:
            start = self.db.Get('misc:msg_start')
            if start != self.netmagic.msg_start: raise KeyError
        except KeyError:
            self.log.write(
                "Database magic number mismatch. Data corruption or incorrect network?"
            )
            raise RuntimeError
示例#16
0
class Manager(object):
    def __init__(self, basedir=None):
        self.cache = Cache(basedir)
        self.auto_tagger = AutoTagger(self.get('auto-tags'))

    def __backup_thread(self):
        while True:
            self.cache.dump()
            time.sleep(self.get('settings')['backup']['interval'])

    def __sync_thread(self):
        while True:
            try:
                resources_tags = self.auto_tagger.process(
                    Crawler(
                        self.get('black-list'),
                        self.get('white-list'),
                        self.get('crawled-resources'),
                    ).crawl())

                SyncAgent(
                    self.get('settings')['server'],
                    self.get('settings')['user-token'],
                    self.get('settings')['device-token'],
                ).sync(resources_tags)

            except Exception as new_exception:
                print('[ERROR]: When trying to sync: {0}'.format(
                    new_exception.message))

            else:
                self.get('crawled-resources').update(
                    set(resource for resource, _ in resources_tags))

            time.sleep(self.get('settings')['sync']['interval'])

    def add_to_black_list(self, directory):
        return Crawler.add_to_dirlist(self.get('black-list'), directory)

    def add_to_white_list(self, directory):
        return Crawler.add_to_dirlist(self.get('white-list'), directory)

    def get(self, key):
        return self.cache.get(key)

    def start_backup_daemon(self):
        DaemonThread(target=self.__backup_thread).start()

    def start_sync_daemon(self):
        DaemonThread(target=self.__sync_thread).start()
    def __init__(self, numberOfSets, number_of_blocks, words_per_block, memory_address_size, byte_addressed=True):
        Cache.__init__(self, number_of_blocks, words_per_block,
                                                   memory_address_size, byte_addressed=True)
        # Delete self._cache for safety to make sure it's not used somewhere hidden
        del self._cache
        # add a variables caches to hold an array of caches equal to number of sets
        self._caches = [[None] * number_of_blocks]
        for i in range(numberOfSets -1):
            self._caches.append([None] * number_of_blocks)

        words_string = "word0"
        for i in range(words_per_block - 1):
            words_string += " word%d" % (i + 1,)
        self._cache_block = namedtuple("cache_block", "lastUsedTime tag %s" % words_string)
示例#18
0
class Manager(object):
    def __init__(self, basedir=None):
        self.cache = Cache(basedir)
        self.auto_tagger = AutoTagger(self.get('auto-tags'))

    def __backup_thread(self):
        while True:
            self.cache.dump()
            time.sleep(self.get('settings')['backup']['interval'])

    def __sync_thread(self):
        while True:
            try:
                resources_tags = self.auto_tagger.process(
                    Crawler(
                        self.get('black-list'),
                        self.get('white-list'),
                        self.get('crawled-resources'),
                    ).crawl()
                )

                SyncAgent(
                    self.get('settings')['server'],
                    self.get('settings')['user-token'],
                    self.get('settings')['device-token'],
                ).sync(resources_tags)

            except Exception as new_exception:
                print('[ERROR]: When trying to sync: {0}'.format(new_exception.message))

            else:
                self.get('crawled-resources').update(set(resource for resource, _ in resources_tags))

            time.sleep(self.get('settings')['sync']['interval'])

    def add_to_black_list(self, directory):
        return Crawler.add_to_dirlist(self.get('black-list'), directory)

    def add_to_white_list(self, directory):
        return Crawler.add_to_dirlist(self.get('white-list'), directory)

    def get(self, key):
        return self.cache.get(key)

    def start_backup_daemon(self):
        DaemonThread(target=self.__backup_thread).start()

    def start_sync_daemon(self):
        DaemonThread(target=self.__sync_thread).start()
示例#19
0
文件: Core.py 项目: Rhoana/butterfly
    def __init__(self, db):
        # Get DB Terms
        self._db = db
        RUNTIME = db.RUNTIME

        # Make Cache with keywords
        self._cache = Cache(RUNTIME)
示例#20
0
def cache_factory(mgr, kind):
    if kind == cache_options[0]:
        return Cache(mgr)
    elif kind == cache_options[1]:
        return FileCache(mgr)
    else:
        raise ValueError("%s is not a valid cache type!".format(kind))
示例#21
0
    def __init__(self,
                 mobile,
                 password=None,
                 status='0',
                 cachefile='Fetion.cache',
                 cookiesfile=''):
        '''登录状态:
        在线:400 隐身:0 忙碌:600 离开:100
        '''
        if cachefile:
            self.cache = Cache(cachefile)

        if not cookiesfile:
            cookiesfile = '%s.cookies' % mobile

        cookiejar = MozillaCookieJar(filename=cookiesfile)
        if not os.path.isfile(cookiesfile):
            open(cookiesfile, 'w').write(MozillaCookieJar.header)

        cookiejar.load(filename=cookiesfile)

        cookie_processor = HTTPCookieProcessor(cookiejar)

        self.opener = build_opener(cookie_processor, HTTPHandler)
        self.mobile, self.password = mobile, password
        if not self.alive():
            self._login()
            cookiejar.save()

        self.changestatus(status)
示例#22
0
    def test_070_timeout(self):
        cache = Cache()
        cache.put("Bank1", CacheItem("key1", "val1", 2))
        time.sleep(1)
        cache.touch("Bank1", "key1")
        time.sleep(1)
        self.assertIsNotNone(cache.get("Bank1", "key1"))
        time.sleep(1)

        with self.assertRaises(Exception) as context:
            cache.get("Bank1", "key1")

        self.assertTrue("Key key1 doesn't exist" in str(context.exception))
示例#23
0
def crawlSingleLocation(id):
    cache = Cache()
    cache.id = id
    cache.name = id + "name"
    cache.difficulty = 2
    cache.terrain = 3
    cache.size = 2
    getUsers(cache)
示例#24
0
 def test_040_delete(self):
     cache = Cache()
     cache.put("Bank1", CacheItem("key1", "val1"))
     cache.put("Bank1", CacheItem("key2", "val2"))
     cache.delete("Bank1", "key1")
     v = cache.getBank("Bank1").keys()
     self.assertNotIn("key1", v)
     self.assertIn("key2", v)
示例#25
0
 def render_pdf(self, tree, width, height, out_file):
     image = cairo.PDFSurface(out_file, width, height)
     d = cairo.Context(image)
     cache = Cache()
     root = tree.node(tree.root)
     hasBranchLengths = self.total_branchlength(tree)
     self.constructNode(tree, root, 0.0, math.pi * 2, 0.0, 0.0, 0.0,
                        hasBranchLengths, cache)
     self.drawTree(cache, d, width, height)
     d.stroke()
示例#26
0
	def __init__(self, cacheFileName=defaultCacheFilename, obsolescenseSeconds=defaultObsolescenseSeconds):
		self.cache=Cache(cacheFileName)
		try:
			self.last=datetime.datetime.fromtimestamp(self.cache["last_update"])
		except:
			self.last=datetime.datetime.fromtimestamp(0)
		
		self.obsolescenseSeconds=obsolescenseSeconds
		
		(self.byISO, self.byName)=self.__class__.buildIndexes(self.getExchangeRates())
示例#27
0
    def setupConfigObjects(self):
        self.messageModifier = MessageModifier(self.config["privacy"]["userAgent"],\
                self.config["privacy"]["enable"])

        self.logger = Logger(self.config["logging"]["logFile"],\
                self.config["logging"]["enable"])

        self.cache = Cache(self.config["caching"]["size"],\
                self.config["caching"]["enable"])

        self.restrictor = Restrictor(self.config["restriction"]["targets"],\
                self.config["restriction"]["enable"])

        self.accountant = Accountant(self.config["accounting"]["users"])

        self.messageInjector = MessageInjector(self.config["HTTPInjection"]["post"]["body"],\
                self.config["HTTPInjection"]["enable"])

        self.logger.log("Proxy launched")
示例#28
0
    def test_cache_store_elements_hits(self):
        cache = Cache(5, 1)
        cache.put('key1', 'value1')
        cache.put('key2', 'value2')
        cache.put('key3', 'value3')
        cache.put('key4', 'value4')
        cache.put('key5', 'value5')

        hits_plan = {'key1': 5, 'key2': 4, 'key3': 3, 'key4': 2, 'key5': 1}

        for key, value in hits_plan.items():
            for _ in range(value):
                cache.get(key)

        self.assertEqual(5, cache.hits[3])
        self.assertEqual(4, cache.hits[4])
        self.assertEqual(3, cache.hits[0])
        self.assertEqual(2, cache.hits[1])
        self.assertEqual(1, cache.hits[2])
示例#29
0
 def __init__(self, config):
     if not config:
         self.bail(-1, "no config: Order init")
     self.config = config
     self.debug = config["debug"]
     #super(Order, self).__init__(config)
     self.cache = Cache({
         "debug": False,
         "prefix": "trade_",
         "host": "localhost"
     })
     self.orders = self.cache.jsonGet("order")
     if not self.orders:
         self.orders = {}
     print self.orders
     if config.has_key('stop_lose'):
         self.stop_lose = config["stop_lose"]
     else:
         self.stop_lose = 0.03
示例#30
0
 def __init__(self,
              cacheDataFile: typing.Union[Path, str, "Cache.Cache"] = None):
     from Cache import Cache, compressors
     if cacheDataFile is None:
         cacheDataFile = Path("./" + self.__class__.cacheFilesPrefix +
                              "Cache.sqlite")
     if not isinstance(cacheDataFile, Cache):
         cacheDataFile = Cache(cacheDataFile,
                               compressors.none,
                               commitOnNOps=50)
     self.cacheDataFile = cacheDataFile
示例#31
0
 def __init__(self, usuario, rol, codigo1, codigo2, codigo3):
     self.window = tk.Tk()
     self.window.title("Sistemas Integrados")
     self.datosCache = Cache(usuario, rol)
     self.sistemaAcceso = SistemaAcceso(codigo1)
     self.sistemaAlarma = SistemaAlarma(codigo2)
     self.sistemaAire = SistemaAire(codigo3)
     self.window.geometry("1000x450")
     self.buildApp()
     self.buildLabels()
     self.update_clock()
     self.sensor = DistanceSensor(self.sistemaAlarma.pinEcho,
                                  self.sistemaAlarma.pinTrig)
     self.setup()
     self.threadAlarma = threading.Thread(target=self.lecturaSensor)
     self.threadAlarma.start()
     self.threadTemperatura = threading.Thread(
         target=self.lecturaSensorTemperatura)
     self.threadTemperatura.start()
     self.window.mainloop()
示例#32
0
    def __init__(self, mobile, password, status='0', cachefile='Fetion.cache'):
        '''登录状态:
        在线:400 隐身:0 忙碌:600 离开:100
        '''
        if cachefile is not None:
            self.cache = Cache(cachefile)

        self.opener = build_opener(HTTPCookieProcessor(CookieJar()),
                                   HTTPHandler)
        self.mobile, self.password = mobile, password
        self._login()
        self.changestatus(status)
示例#33
0
    def __init__(self, infile, outfile, num_registers=32):
        self.__disassembler = Disassembler()

        self.__input_file = infile
        self.__output_file = outfile
        self.__f = open('team13_out_pipeline.txt', 'w')

        self.__pc = 96
        self.__cycle = 0

        self.__memory = {}
        self.__last_inst = 0

        self.pre_issue_size = 4
        self.pre_alu_size = 2
        self.pre_mem_size = 2
        self.post_alu_size = 1
        self.post_mem_size = 1

        self.__pre_issue_buffer = deque(maxlen=self.pre_issue_size)

        self.__pre_mem_buffer = deque(maxlen=self.pre_mem_size)
        self.__pre_alu_buffer = deque(maxlen=self.pre_alu_size)

        self.__post_mem_buffer = deque(maxlen=self.post_mem_size)
        self.__post_alu_buffer = deque(maxlen=self.post_alu_size)

        self.__cache_to_load = deque(maxlen=2)

        self.__register_file = RegisterFile(num_registers)
        self.__wb = WriteBackUnit(self.__register_file)
        self.__cache = Cache(self.__memory)
        self.__alu = ALU()
        self.__mem = MemoryUnit(self.__cache)
        self.__if = IFUnit(self.__cache, self.__register_file)
        self.__iu = IssueUnit(self.__register_file, self.__pre_issue_buffer,
                              self.__pre_mem_buffer, self.__pre_alu_buffer,
                              self.__post_mem_buffer, self.__post_alu_buffer)

        self.__read_file()
示例#34
0
 def testCase2(self):
     cache = Cache(3)
     cache.Add(1, 'a')
     cache.Add(2, 'b') # 1->2
     self.assertEqual(cache.Fetch(1), 'a') # 2->1
     self.assertEqual(cache.Fetch(2), 'b') # 1->2
     cache.Add(3, 'c') # 1->2->3
     self.assertEqual(cache.Fetch(2), 'b') # 1->3->2
     cache.Add(4, 'd') # 3->2->4
     self.assertEqual(cache.Fetch(1), None) 
     self.assertEqual(cache.Fetch(3), 'c') # 2->4->3
     self.assertEqual(cache.Fetch(4), 'd') # 2->3->4
     cache.Add(1, 'a') # 3->4->1
     self.assertEqual(cache.Fetch(2), None) 
     self.assertEqual(cache.Fetch(1), 'a') # 3->4->1
     cache.Remove(4) # 3->1
     self.assertEqual(cache.Fetch(4), None) 
     cache.Remove(3) # 1
     self.assertEqual(cache.Fetch(3), None) 
     cache.Remove(1)
     self.assertEqual(cache.Fetch(1), None) 
     self.assertEqual(cache.size(), 0) 
     cache.Add(1, 'a')
     self.assertEqual(cache.Fetch(1), 'a') 
示例#35
0
 def testCase1(self):
     cache = Cache(1)
     self.assertEqual(cache.size(), 0) 
     self.assertEqual(cache.capacity(), 1) 
     cache.Add(1, 'a')
     self.assertEqual(cache.size(), 1) 
     self.assertEqual(cache.capacity(), 1) 
     self.assertEqual(cache.Fetch(1), 'a') 
     cache.Add(2, 'b')
     self.assertEqual(cache.Fetch(1), None) 
     self.assertEqual(cache.Fetch(2), 'b') 
     cache.Add(1, 'a')
     self.assertEqual(cache.Fetch(1), 'a') 
     self.assertEqual(cache.Fetch(2), None) 
     cache.Remove(1)
     self.assertEqual(cache.Fetch(1), None) 
     self.assertEqual(cache.size(), 0) 
     self.assertEqual(cache.capacity(), 1) 
     cache.Add(3, 'c')
     cache.Clear()
     self.assertEqual(cache.Fetch(3), None) 
     self.assertEqual(cache.size(), 0) 
示例#36
0
	def __init__(self, settings, datadir, log, mempool, netmagic,
		     readonly=False, fast_dbm=False,compression=False):
		self.settings = settings
		self.log = log
		self.mempool = mempool
		self.readonly = readonly
		self.netmagic = netmagic
		self.fast_dbm = fast_dbm
		self.blk_cache = Cache(1000)
		self.orphans = {}
		self.orphan_deps = {}
		self.compress_on_write = compression

		# LevelDB to hold:
		#    tx:*      transaction outputs
		#    misc:*    state
		#    height:*  list of blocks at height h
		#    blkmeta:* block metadata
		#    blocks:*  block seek point in stream
		self.blk_write = io.BufferedWriter(io.FileIO(datadir + '/blocks.dat','ab'))
		self.blk_read = io.BufferedReader(io.FileIO(datadir + '/blocks.dat','rb'))
		self.db = leveldb.LevelDB(datadir + '/leveldb')

		try:
			self.db.Get('misc:height')
		except KeyError:
			self.log.write("INITIALIZING EMPTY BLOCKCHAIN DATABASE")
			batch = leveldb.WriteBatch()
			batch.Put('misc:height', str(-1))
			batch.Put('misc:msg_start', self.netmagic.msg_start)
			batch.Put('misc:tophash', ser_uint256(0L))
			batch.Put('misc:total_work', hex(0L))
			self.db.Write(batch)

		try:
			start = self.db.Get('misc:msg_start')
			if start != self.netmagic.msg_start: raise KeyError
		except KeyError:
			self.log.write("Database magic number mismatch. Data corruption or incorrect network?")
			raise RuntimeError
示例#37
0
	def __init__(self, uri, username = None, password = None, lock = None, classes = None, timeout = None, *args, **kw):
		super(TSSGRepository, self).__init__(*args, **kw)
		
		#raise Exception(uri)
		
		if lock is None:
			from ngniutils.threading import RWLock
			lock = RWLock() 
			
		self.timeout = timeout
		self.__lock = lock
		self.__serializer = TSSGSerializer.TSSGSerializer(self)
		self.__executor = RestExecutor(uri = uri, username = username, password = password, content_type = "text/xml")
		self.__cache = Cache(self, classes, timeout = timeout)
		self.__refresh_done = threading.Condition()
		self.__refreshing = False
		
		if classes:
			import teagle.repository.entities
			teagle.repository.entities._classes = classes
		
		self.refresh()
示例#38
0
文件: Core.py 项目: Rhoana/butterfly
class Core(object):
    """ Starts the :class:`Cache`

    Arguments
    -----------
    db : :data:`bfly.Butterfly._db_type`
        A fully-loaded database

    Attributes
    ------------
    _db: :data:`bfly.Butterfly._db_type`
        Taken from first argument ``db``
    _cache: :class:`Cache`
        Able to store images and metadata using \
:class:`UtilityLayer.RUNTIME` instance \
from ``db`` argument
    """
    def __init__(self, db):
        # Get DB Terms
        self._db = db
        RUNTIME = db.RUNTIME

        # Make Cache with keywords
        self._cache = Cache(RUNTIME)

    #####
    # All methods to load data
    #     1. get_info answers an InfoQuery i_query.
    #     2. get_data answers a DataQuery d_query.
    #
    # Both get_info or get_data call update_query.
    #
    # To give answers , update_query uses _cache or:
    #     1. make_data_query if only i_query given.
    #     2. make_tile_query with new or given d_query.
    #####

    @staticmethod
    def get_groups(i_query):
        """ dumps group list for ``i_query`` as a string

        Arguments
        ----------
        i_query: :class:`QueryLayer.InfoQuery`
            A request for a list of groups

        Returns
        --------
        str
            A list of all groups for the ``i_query``
        """
        return i_query.dump

    def get_edits(self, i_query, msg={}):
        """ dumps websocket updates to ``i_query`` as a string

        Calls :meth:`update_query` with more information\
from the cache or from the properties of a tile.

        Arguments
        ----------
        i_query: :class:`QueryLayer.InfoQuery`
            A request for information

        Returns
        --------
        str
            Wesocket info for :class:`QueryLayer.InfoQuery`
        """
        keywords = self.update_query(i_query)
        # Update current query with preloaded terms
        i_query.update_source(keywords)
        # Execute weboscket command if needed
        changed = i_query.websocket_edit(msg)
        # Add to cache and query
        if len(changed):
            keywords.update(changed)
            i_query.update_source(keywords)
            self._cache.set(i_query.key, keywords)

        # Return the i_query info
        return i_query.dump

    def get_info(self, i_query):
        """ dumps answer to ``i_query`` as a string

        Calls :meth:`update_query` with more information\
from the cache or from the properties of a tile.

        Arguments
        ----------
        i_query: :class:`QueryLayer.InfoQuery`
            A request for information

        Returns
        --------
        str
            Channel info for :class:`QueryLayer.InfoQuery`
        """
        keywords = self.update_query(i_query)
        # Update current query with preloaded terms
        i_query.update_source(keywords)
        # Return the i_query info
        return i_query.dump

    def get_dataset(self, i_query):
        """ dumps dataset from ``i_query`` as a string

        Calls :meth:`update_query` with more information\
from the cache or from the properties of a tile.

        Arguments
        ----------
        i_query: :class:`QueryLayer.InfoQuery`
            A request for information

        Returns
        --------
        str
            Dataset info for :class:`QueryLayer.InfoQuery`
        """
        all_channels = []
        # Update all the channels in the dataset
        for channel in i_query.channels:
            # Set the i_query to a given channel
            i_query.set_channel(channel)
            # Update the info for the channel
            keywords = self.update_query(i_query)
            # Add additionl keywords if needed
            channel_key = i_query.OUTPUT.INFO.CHANNEL.NAME
            keywords[channel_key] = channel[channel_key]
            # Add to list of channels
            all_channels.append(keywords)
        # Clear the channel data
        i_query.set_channel({})
        # Update current query with preloaded terms
        return i_query.dump_dataset(all_channels)

    def get_data(self, d_query):
        """ dumps answer to ``d_query`` as a string

        Calls :meth:`update_query` with more information\
from the cache or from the properties of a tile. \
Also calls :meth:`find_tiles` to get the complete\
image needed to answer the ``d_query``.

        Arguments
        ----------
        i_query: :class:`QueryLayer.InfoQuery`
            A request for information

        Returns
        --------
        str
            Answer for the :class:`QueryLayer.InfoQuery`
        """

        keywords = self.update_query(d_query)
        # Update current query with preloaded terms
        d_query.update_source(keywords)
        # Get the image for the d_query
        image = self.find_tiles(d_query)
        return self.write_image(d_query, image)

    @staticmethod
    def make_data_query(i_query):
        """ Make a data query from an info query

        Arguments
        ----------
        i_query: :class:`InfoQuery`
            only needs ``PATH`` set in :data:`OUTPUT.INFO`

        Returns
        --------
        :class:`DataQuery`
            takes only the `PATH` from ``i_query``

        """
        # Begin building needed keywords
        i_path = i_query.OUTPUT.INFO.PATH

        return DataQuery(**{
            i_query.INPUT.METHODS.NAME: 'data',
            i_path.NAME: i_path.VALUE
        })

    @staticmethod
    def make_tile_query(d_query, t_index=np.uint32([0,0,0])):
        """ Make a :class:`TileQuery` from :class:`DataQuery`

        Arguments
        ----------
        d_query: :class:`DataQuery`
            only needs ``PATH`` set in :data:`OUTPUT.INFO`
        t_index: numpy.ndarray
            The 3x1 count of tiles form the origin

        Returns
        --------
        :class:`TileQuery`
            One tile request in the given data request
        """
        tile_crop = d_query.all_in_some(t_index)
        return TileQuery(d_query, t_index, tile_crop)

    def update_query(self, query):
        """ Finds missing query details from cache or tile

        Makes ``keywords`` from either the :data:`_cache` or \
from a new :class:`TileQuery` to update the given ``query``

        Arguments
        ----------
        query: :class:`Query`
            Either an :class:`InfoQuery` or a :class:`DataQuery`

        Returns
        --------
        keywords: dict
            Can pass to :meth:`Query.update_source` or combine \
to pass to :meth:`Query.update_dataset`.
        """
        keywords = self._cache.get(query.key)
        if not len(keywords):
            d_query = query
            # Create a preparatory data_query
            if not isinstance(query, DataQuery):
                d_query = self.make_data_query(query)
            # Create a preparatory tile_query
            t0_query = self.make_tile_query(d_query)
            # Update keywords and set the cache
            keywords = t0_query.preload_source
            self._cache.set(query.key, keywords)
        # Return the updated keywords
        return keywords

    #####
    # Image Specific Methods
    #####

    def find_tiles(self, d_query):
        """ Load the requested image for a :class:`DataQuery`

        Arguments
        ----------
        d_query: :class:`DataQuery`
            Request for a scaled subvolume of a source image

        Returns
        numpy.ndarray
            The full image data for the requested region
        """
        first_tile_index = d_query.tile_bounds[0]
        all_tiles = np.argwhere(np.ones(d_query.tile_shape))
        cutout = np.zeros(d_query.target_shape, d_query.dtype)
        tiles_needed = first_tile_index + all_tiles

        for t_index in tiles_needed:
            # Make a query for the given tile
            t_query = self.make_tile_query(d_query, t_index)
            tile = self.load_tile(t_query)
            if not len(tile):
                continue
            # Fill the tile into the full cutout
            to_cut = [t_query.target_origin, tile.shape]
            [Z0,Y0,X0],[Z1,Y1,X1] = d_query.some_in_all(*to_cut)
            cutout[Z0:Z1,Y0:Y1,X0:X1] = tile

        return cutout

    def find_unique(self, d_query):
        """ Get unique values for a :class:`DataQuery`

        Arguments
        ----------
        d_query: :class:`DataQuery`
            Request for a scaled subvolume of a source image

        Returns
        set
            The set of unique values for the request
        """
        first_tile_index = d_query.tile_bounds[0]
        all_tiles = np.argwhere(np.ones(d_query.tile_shape))
        tiles_needed = first_tile_index + all_tiles
        # Set of all unique values
        unique = set()

        for t_index in tiles_needed:
            # Make a query for the given tile
            t_query = self.make_tile_query(d_query, t_index)
            tile = self.load_tile(t_query)
            # Union of unique values with the full set
            tile_bins = np.bincount(tile.flatten()) > 0
            unique = unique | set(np.where(tile_bins)[0])

        return unique

    def load_tile(self, t_query):
        """ Load a single tile from the cache or from disk

        Arguments
        ----------
        t_query: :class:`TileQuery`
            With tile coordinates and volume within the tile

        Returns
        --------
        numpy.ndarray
            The subregion image data for the requested tile
        """
        # grab request size for query
        t_bounds = t_query.target_bounds
        t_origin = t_query.target_tile_bounds[0]
        (K0,J0,I0),(K1,J1,I1) = t_bounds-t_origin

        # Load from cache or from disk if needed
        cache_tile = self._cache.get(t_query.key)
        if len(cache_tile):
            return cache_tile[K0:K1,J0:J1,I0:I1]
        # Load from disk
        tile = t_query.tile
        if not len(tile):
            return []

        self._cache.set(t_query.key, tile)

        return tile[K0:K1,J0:J1,I0:I1]

    @staticmethod
    def view_volume(view, vol):
        """ Display a volume in color or grayscale

        Arguments
        ----------
        view: str
            The requested color or gray view of the data
        vol: str
            Raw volume from :class:`Cache` / :class:`Datasource`

        Returns
        --------
        numpy.ndarray
            Colorized or original raw volume
        """
        # Set up a colormap
        def id_to_color(vol):
            colors = np.zeros((3,)+ vol.shape).astype(np.uint8)
            colors[0] = np.mod(107 * vol, 700).astype(np.uint8)
            colors[1] = np.mod(509 * vol, 900).astype(np.uint8)
            colors[2] = np.mod(200 * vol, 777).astype(np.uint8)
            return np.moveaxis(colors,0,-1)

        # Colormap if a colormap view
        if view.VALUE == view.COLOR.NAME:
            return id_to_color(vol)
        return vol

    def write_image(self, d_query, volume):
        """ Format a volume for a given :class:`DataQuery`

        Arguments
        ----------
        d_query: :class:`DataQuery`
            With the format and view for the requested volume
        volume: numpy.ndarray
            Raw volume from :class:`Cache` / :class:`Datasource`

        Returns
        --------
        str:
            The image response as a formatted bytestring
        """
        img_format = d_query.INPUT.IMAGE.FORMAT
        img_view = d_query.INPUT.IMAGE.VIEW
        img_type = d_query.OUTPUT.INFO.TYPE

        # Only if grayscale view is set
        if img_view.VALUE == img_view.GRAY.NAME:
            # set the view based on the format
            is_big_int = img_type.VALUE in img_type.ID_LIST
            no_big_int_gray = img_format.VALUE in img_format.COLOR_LIST
            # If big integers must not be grayscale, try colormap
            if is_big_int and no_big_int_gray:
                img_view.VALUE = img_view.COLOR.NAME

        # If Multiple slices cannot be formatted
        if img_format.VALUE not in img_format.VOL_LIST:
            shape = volume.shape
            if shape[0] > 1:
                # Flatten the volume to image
                volume = volume.reshape(1, -1, shape[-1])
        # Use colormap / RGB style encoding of ID data
        vol = self.view_volume(img_view, volume)

        if img_format.VALUE in ['raw']:
            output = StringIO.StringIO()
            output.write(vol.tobytes())
            vol_string = output.getvalue()
            return vol_string

        if img_format.VALUE in ['npz']:
            output = StringIO.StringIO()
            np.save(output, vol[np.newaxis])
            vol_string = output.getvalue()
            return zlib.compress(vol_string)

        if img_format.VALUE in img_format.ZIP_LIST:
            output = StringIO.StringIO()
            volstring = vol[0].T.tostring('F')
            output.write(zlib.compress(volstring))
            return output.getvalue()

        if img_format.VALUE in img_format.TIF_LIST:
            output = StringIO.StringIO()
            tiffvol = vol[0]
            tifffile.imsave(output, tiffvol)
            return output.getvalue()

        filetype = "." + img_format.VALUE
        image = cv2.imencode(filetype, vol[0])
        return image[1].tostring()
示例#39
0
class ChainDb(object):
	def __init__(self, settings, datadir, log, mempool, netmagic,
		     readonly=False, fast_dbm=False):
		self.settings = settings
		self.log = log
		self.mempool = mempool
		self.readonly = readonly
		self.netmagic = netmagic
		self.fast_dbm = fast_dbm
		self.blk_cache = Cache(750)
		self.orphans = {}
		self.orphan_deps = {}

		# LevelDB to hold:
		#    tx:*      transaction outputs
		#    misc:*    state
		#    height:*  list of blocks at height h
		#    blkmeta:* block metadata
		#    blocks:*  block seek point in stream
		self.blk_write = io.BufferedWriter(io.FileIO(datadir + '/blocks.dat','ab'))
		self.blk_read = io.BufferedReader(io.FileIO(datadir + '/blocks.dat','rb'))
		self.db = leveldb.LevelDB(datadir + '/leveldb')

		try:
			self.db.Get('misc:height')
		except KeyError:
			self.log.write("INITIALIZING EMPTY BLOCKCHAIN DATABASE")
			batch = leveldb.WriteBatch()
			batch.Put('misc:height', str(-1))
			batch.Put('misc:msg_start', self.netmagic.msg_start)
			batch.Put('misc:tophash', ser_uint256(0L))
			batch.Put('misc:total_work', hex(0L))
			self.db.Write(batch)

		try:
			start = self.db.Get('misc:msg_start')
			if start != self.netmagic.msg_start: raise KeyError
		except KeyError:
			self.log.write("Database magic number mismatch. Data corruption or incorrect network?")
			raise RuntimeError

	def puttxidx(self, txhash, txidx, batch=None):
		ser_txhash = ser_uint256(txhash)


		try:
			self.db.Get('tx:'+ser_txhash)
			old_txidx = self.gettxidx(txhash)
			self.log.write("WARNING: overwriting duplicate TX %064x, height %d, oldblk %064x, oldspent %x, newblk %064x" % (txhash, self.getheight(), old_txidx.blkhash, old_txidx.spentmask, txidx.blkhash))
		except KeyError:
			pass
		batch = self.db if batch is not None else batch
		batch.Put('tx:'+ser_txhash, hex(txidx.blkhash) + ' ' +
					       hex(txidx.spentmask))

		return True

	def gettxidx(self, txhash):
		ser_txhash = ser_uint256(txhash)
		try:
			ser_value = self.db.Get('tx:'+ser_txhash)
		except KeyError:
			return None

		pos = string.find(ser_value, ' ')

		txidx = TxIdx()
		txidx.blkhash = long(ser_value[:pos], 16)
		txidx.spentmask = long(ser_value[pos+1:], 16)

		return txidx

	def gettx(self, txhash):
		txidx = self.gettxidx(txhash)
		if txidx is None:
			return None

		block = self.getblock(txidx.blkhash)
		for tx in block.vtx:
			tx.calc_sha256()
			if tx.sha256 == txhash:
				return tx

		self.log.write("ERROR: Missing TX %064x in block %064x" % (txhash, txidx.blkhash))
		return None

	def haveblock(self, blkhash, checkorphans):
		if self.blk_cache.exists(blkhash):
			return True
		if checkorphans and blkhash in self.orphans:
			return True
		ser_hash = ser_uint256(blkhash)
		try: 
			self.db.Get('blocks:'+ser_hash)
			return True
		except KeyError:
			return False

	def have_prevblock(self, block):
		if self.getheight() < 0 and block.sha256 == self.netmagic.block0:
			return True
		if self.haveblock(block.hashPrevBlock, False):
			return True
		return False

	def getblock(self, blkhash):
		block = self.blk_cache.get(blkhash)
		if block is not None:
			return block

		ser_hash = ser_uint256(blkhash)
		try:
			# Lookup the block index, seek in the file
			fpos = long(self.db.Get('blocks:'+ser_hash))
			self.blk_read.seek(fpos)

			# read and decode "block" msg
			msg = message_read(self.netmagic, self.blk_read)
			if msg is None:
				return None
			block = msg.block
		except KeyError:
			return None

		self.blk_cache.put(blkhash, block)

		return block

	def spend_txout(self, txhash, n_idx, batch=None):
		txidx = self.gettxidx(txhash)
		if txidx is None:
			return False

		txidx.spentmask |= (1L << n_idx)
		self.puttxidx(txhash, txidx, batch)

		return True

	def clear_txout(self, txhash, n_idx, batch=None):
		txidx = self.gettxidx(txhash)
		if txidx is None:
			return False

		txidx.spentmask &= ~(1L << n_idx)
		self.puttxidx(txhash, txidx, batch)

		return True

	def unique_outpts(self, block):
		outpts = {}
		txmap = {}
		for tx in block.vtx:
			if tx.is_coinbase:
				continue
			txmap[tx.sha256] = tx
			for txin in tx.vin:
				v = (txin.prevout.hash, txin.prevout.n)
				if v in outs:
					return None

				outpts[v] = False

		return (outpts, txmap)

	def spent_outpts(self, block):
		# list of outpoints this block wants to spend
		l = self.unique_outpts(block)
		if l is None:
			return None
		outpts = l[0]
		txmap = l[1]
		spendlist = {}

		# pass 1: if outpoint in db, make sure it is unspent
		for k in outpts.iterkeys():
			txidx = self.gettxidx(k[0])
			if txidx is None:
				continue

			if k[1] > 100000:	# outpoint index sanity check
				return None

			if txidx.spentmask & (1L << k[1]):
				return None

			outpts[k] = True	# skip in pass 2

		# pass 2: remaining outpoints must exist in this block
		for k, v in outpts.iteritems():
			if v:
				continue

			if k[0] not in txmap:	# validate txout hash
				return None

			tx = txmap[k[0]]	# validate txout index (n)
			if k[1] >= len(tx.vout):
				return None

			# outpts[k] = True	# not strictly necessary

		return outpts.keys()
示例#40
0
 def __init__(self, basedir=None):
     self.cache = Cache(basedir)
     self.auto_tagger = AutoTagger(self.get('auto-tags'))
示例#41
0
def run(movie_dir, html_output_flag, limit):
    """This is the real entry point for the program"""

    #A class to help lookup movie titles
    movielookup = MovieLookup()

    #Match files in a given directory
    matcher = Matcher(Config.movie_match_regex, Config.allowed_file_types)

    #Used to find an imdb id from movie filename
    id_finder = IdFinder()

    #Used for caching movie data
    movie_cache = Cache(Config.movie_cache_file)

    #First, let's match files which match the regex and have the
    #required file extensions in the given directory
    matcher.find_in_directory(movie_dir)
    movie_matches = matcher.get_matches()
    unmatched = matcher.get_ignored()

    #normalise the matches (the filenames will be used as movie titles)
    normalised_movie_matches = Normaliser\
        .normalise_list_and_remove_trailing_number(movie_matches)

    #Now we lookup successful matches, first in the cache, then online
    movie_data = {}      #successful lookup data will go here
    failed_lookups = []  #we will do something with failed lookups later...

    count = 0   #used to limit the number of lookups we will do
    for title in normalised_movie_matches:
        count += 1
        if count >= limit:#check that we don't go over the arbitrary limit
            break

        #Check if the movie is in our cache
        cached_movie = movie_cache.get(title)
        if cached_movie:
            movie_data[title] = cached_movie
        #Otherwise, lookup using API
        else:
            #look up each movie in the list
            lookup_data = movielookup.lookup_by_title(title)

            #check if we found a movie
            if MovieDataUtil.is_valid_lookup_result(lookup_data):
                movie_data[title] = lookup_data
                #great, let's also add it to the cache
                movie_cache.add_to_cache(title, lookup_data)
            else:
                failed_lookups.append(title)

    #now we will try to correct the failed lookups
    #by using google to find each imdb id
    id_lookup_dict = id_finder.find_id_by_title_list(failed_lookups)

    #reset the failed lookups
    failed_lookups = []      #there should be a lot less now...
    title_corrections = 0    #count how many corrections we actually found

    #Now lookup using the new ids which we found
    for title, found_id in id_lookup_dict.items():
        if found_id != None:
            #we found an id, now let's look the movie up by its id
            lookup_data = movielookup.lookup_by_id(found_id)

            #theoretically this should always be true
            #unless we got an invalid id somehow...
            if MovieDataUtil.is_valid_lookup_result(lookup_data):
                movie_data[title] = lookup_data
                title_corrections += 1
                #great, let's also add it to the cache
                movie_cache.add_to_cache(title, lookup_data)
            else:
                failed_lookups.append(title)
        else:
            failed_lookups.append(title)

    #Save the updated cache
    movie_cache.save_cache_to_disk()

    #sort the data by imdb id
    movie_data = MovieDataUtil.sort_movie_data(movie_data)

    #Output the data
    if html_output_flag:
        logging.debug('Loading template from: %s', Config.template_directory)
        template_environment = Environment( \
                        loader=FileSystemLoader( \
                        Config.template_directory), trim_blocks=True)
        print template_environment.get_template('main.html').render(
            movie_lookup_data=movie_data,
            failed_lookups=failed_lookups,
            unmatched=unmatched,
            title_corrections=title_corrections,
            datetime=time.strftime("%c"),
            version=__version__,
            author=__author__,
            cache_stats=movie_cache.cache_stats(),
        )
    else:
        simple_output(movie_data, failed_lookups, unmatched)
示例#42
0
class ChainDb(object):
	def __init__(self, settings, datadir, log, mempool, netmagic,
		     readonly=False, fast_dbm=False,compression=False):
		self.settings = settings
		self.log = log
		self.mempool = mempool
		self.readonly = readonly
		self.netmagic = netmagic
		self.fast_dbm = fast_dbm
		self.blk_cache = Cache(1000)
		self.orphans = {}
		self.orphan_deps = {}
		self.compress_on_write = compression

		# LevelDB to hold:
		#    tx:*      transaction outputs
		#    misc:*    state
		#    height:*  list of blocks at height h
		#    blkmeta:* block metadata
		#    blocks:*  block seek point in stream
		self.blk_write = io.BufferedWriter(io.FileIO(datadir + '/blocks.dat','ab'))
		self.blk_read = io.BufferedReader(io.FileIO(datadir + '/blocks.dat','rb'))
		self.db = leveldb.LevelDB(datadir + '/leveldb')

		try:
			self.db.Get('misc:height')
		except KeyError:
			self.log.write("INITIALIZING EMPTY BLOCKCHAIN DATABASE")
			batch = leveldb.WriteBatch()
			batch.Put('misc:height', str(-1))
			batch.Put('misc:msg_start', self.netmagic.msg_start)
			batch.Put('misc:tophash', ser_uint256(0L))
			batch.Put('misc:total_work', hex(0L))
			self.db.Write(batch)

		try:
			start = self.db.Get('misc:msg_start')
			if start != self.netmagic.msg_start: raise KeyError
		except KeyError:
			self.log.write("Database magic number mismatch. Data corruption or incorrect network?")
			raise RuntimeError

	def puttxidx(self, txhash, txidx, batch=None):
		ser_txhash = ser_uint256(txhash)


		try:
			self.db.Get('tx:'+ser_txhash)
			old_txidx = self.gettxidx(txhash)
			self.log.write("WARNING: overwriting duplicate TX %064x, height %d, oldblk %064x, oldspent %x, newblk %064x" % (txhash, self.getheight(), old_txidx.blkhash, old_txidx.spentmask, txidx.blkhash))
		except KeyError:
			pass
		batch = self.db if batch is not None else batch
		batch.Put('tx:'+ser_txhash, hex(txidx.blkhash) + ' ' +
					       hex(txidx.spentmask))

		return True

	def gettxidx(self, txhash):
		ser_txhash = ser_uint256(txhash)
		try:
			ser_value = self.db.Get('tx:'+ser_txhash)
		except KeyError:
			return None

		pos = string.find(ser_value, ' ')

		txidx = TxIdx()
		txidx.blkhash = long(ser_value[:pos], 16)
		txidx.spentmask = long(ser_value[pos+1:], 16)

		return txidx

	def gettx(self, txhash):
		txidx = self.gettxidx(txhash)
		if txidx is None:
			return None

		block = self.getblock(txidx.blkhash)
		if block:
			for tx in block.vtx:
				tx.calc_sha256()
				if tx.sha256 == txhash:
					return tx

		self.log.write("ERROR: Missing TX %064x in block %064x" % (txhash, txidx.blkhash))
		return None

	def haveblock(self, blkhash, checkorphans):
		if self.blk_cache.exists(blkhash):
			return True
		if checkorphans and blkhash in self.orphans:
			return True
		ser_hash = ser_uint256(blkhash)
		try: 
			self.db.Get('blocks:'+ser_hash)
			return True
		except KeyError:
			return False

	def have_prevblock(self, block):
		if self.getheight() < 0 and block.sha256 == self.netmagic.block0:
			return True
		if self.haveblock(block.hashPrevBlock, False):
			return True
		return False

	def getblock(self, blkhash):
		block = self.blk_cache.get(blkhash)
		if block is not None:
			return block

		ser_hash = ser_uint256(blkhash)
		try:
			# Lookup the block index, seek in the file
			fpos = long(self.db.Get('blocks:'+ser_hash))
			self.blk_read.seek(fpos)

			# read and decode "block" msg

			recvbuf = self.blk_read.read(4+4)
			if recvbuf[:4] == 'ZLIB':
				msg_len = int(recvbuf[4:8].encode('hex'),16)
				recvbuf = self.blk_read.read(msg_len)
			
				f = cStringIO.StringIO(zlib.decompress(recvbuf))
				msg = message_read(self.netmagic, f)
			else:	
				self.blk_read.seek(fpos)
				msg = message_read(self.netmagic, self.blk_read)
			
			
			if msg is None:
				return None
			block = msg.block
		except KeyError:
			return None

		self.blk_cache.put(blkhash, block)

		return block

	def spend_txout(self, txhash, n_idx, batch=None):
		txidx = self.gettxidx(txhash)
		if txidx is None:
			return False

		txidx.spentmask |= (1L << n_idx)
		self.puttxidx(txhash, txidx, batch)

		return True

	def clear_txout(self, txhash, n_idx, batch=None):
		txidx = self.gettxidx(txhash)
		if txidx is None:
			return False

		txidx.spentmask &= ~(1L << n_idx)
		self.puttxidx(txhash, txidx, batch)

		return True

	def unique_outpts(self, block):
		outpts = {}
		txmap = {}
		for tx in block.vtx:
			if tx.is_coinbase:
				continue
			txmap[tx.sha256] = tx
			for txin in tx.vin:
				v = (txin.prevout.hash, txin.prevout.n)
				if v in outs:
					return None

				outpts[v] = False

		return (outpts, txmap)

	def txout_spent(self, txout):
		txidx = self.gettxidx(txout.hash)
		if txidx is None:
			return None

		if txout.n > 100000:	# outpoint index sanity check
			return None

		if txidx.spentmask & (1L << txout.n):
			return True

		return False
示例#43
0
 def testCase3(self):
     cache = Cache(5)
     cache.Add(1, 'a')
     self.assertEqual(cache._head().key, 1) 
     self.assertEqual(cache._tail().key, 1) 
     cache.Add(2, 'b')
     self.assertEqual(cache._head().key, 1) 
     self.assertEqual(cache._tail().key, 2) 
     cache.Add(3, 'c')
     self.assertEqual(cache._head().key, 1) 
     self.assertEqual(cache._tail().key, 3) 
     cache.Add(4, 'd')
     self.assertEqual(cache._head().key, 1) 
     self.assertEqual(cache._tail().key, 4) 
     cache.Add(5, 'e') # 1->2->3->4->5
     self.assertEqual(cache._head().key, 1) 
     self.assertEqual(cache._tail().key, 5) 
     self.assertEqual(cache.Fetch(2), 'b') # 1->3->4->5->2
     self.assertEqual(cache._head().key, 1) 
     self.assertEqual(cache._tail().key, 2) 
     self.assertEqual(cache.Fetch(1), 'a') # 3->4->5->2->1
     self.assertEqual(cache._head().key, 3) 
     self.assertEqual(cache._tail().key, 1) 
     self.assertEqual(cache.Fetch(1), 'a') # 3->4->5->2->1
     self.assertEqual(cache._head().key, 3) 
     self.assertEqual(cache._tail().key, 1) 
     cache.Remove(3) # 4->5->2->1
     self.assertEqual(cache._head().key, 4) 
     self.assertEqual(cache._tail().key, 1) 
     cache.Remove(1) # 4->5->2
     self.assertEqual(cache._head().key, 4) 
     self.assertEqual(cache._tail().key, 2) 
     self.assertEqual(cache.size(), 3) 
     assert cache.Fetch(5) # 4->2->5
     self.assertEqual(cache._head().key, 4) 
     self.assertEqual(cache._tail().key, 5) 
     self.assertEqual(cache.size(), 3) 
     self.assertEqual(cache.capacity(), 5) 
     cache.Remove(4) # 2->5
     self.assertEqual(cache._head().key, 2) 
     self.assertEqual(cache._tail().key, 5) 
     self.assertEqual(cache.Fetch(4), None) 
     cache.Remove(2)  # 5
     self.assertEqual(cache.Fetch(2), None) 
     self.assertEqual(cache._head().key, 5) 
     self.assertEqual(cache._tail().key, 5) 
     cache.Clear()
     self.assertEqual(cache._head(), None) 
     self.assertEqual(cache._tail(), None) 
示例#44
0
class ChainDb(object):
	def __init__(self, settings, datadir, log, mempool, netmagic,
		     readonly=False, fast_dbm=False):
		self.settings = settings
		self.log = log
		self.mempool = mempool
		self.readonly = readonly
		self.netmagic = netmagic
		self.fast_dbm = fast_dbm
		self.blk_cache = Cache(750)
		self.orphans = {}
		self.orphan_deps = {}
		if readonly:
			mode_str = 'r'
		else:
			mode_str = 'c'
			if fast_dbm:
				self.log.write("Opening database in fast mode")
				mode_str += 'f'
		self.misc = gdbm.open(datadir + '/misc.dat', mode_str)
		self.blocks = gdbm.open(datadir + '/blocks.dat', mode_str)
		self.height = gdbm.open(datadir + '/height.dat', mode_str)
		self.blkmeta = gdbm.open(datadir + '/blkmeta.dat', mode_str)
		self.tx = gdbm.open(datadir + '/tx.dat', mode_str)

		if 'height' not in self.misc:
			self.log.write("INITIALIZING EMPTY BLOCKCHAIN DATABASE")
			self.misc['height'] = str(-1)
			self.misc['msg_start'] = self.netmagic.msg_start
			self.misc['tophash'] = ser_uint256(0L)
			self.misc['total_work'] = hex(0L)

		if 'msg_start' not in self.misc or (self.misc['msg_start'] != self.netmagic.msg_start):
			self.log.write("Database magic number mismatch. Data corruption or incorrect network?")
			raise RuntimeError

	def dbsync(self):
		self.misc.sync()
		self.blocks.sync()
		self.height.sync()
		self.blkmeta.sync()
		self.tx.sync()

	def puttxidx(self, txhash, txidx):
		ser_txhash = ser_uint256(txhash)

		if ser_txhash in self.tx:
			old_txidx = self.gettxidx(txhash)
			self.log.write("WARNING: overwriting duplicate TX %064x, height %d, oldblk %064x, oldspent %x, newblk %064x" % (txhash, self.getheight(), old_txidx.blkhash, old_txidx.spentmask, txidx.blkhash))

		self.tx[ser_txhash] = (hex(txidx.blkhash) + ' ' +
				       hex(txidx.spentmask))

		return True

	def gettxidx(self, txhash):
		ser_txhash = ser_uint256(txhash)
		if ser_txhash not in self.tx:
			return None

		ser_value = self.tx[ser_txhash]
		pos = string.find(ser_value, ' ')

		txidx = TxIdx()
		txidx.blkhash = long(ser_value[:pos], 16)
		txidx.spentmask = long(ser_value[pos+1:], 16)

		return txidx

	def gettx(self, txhash):
		txidx = self.gettxidx(txhash)
		if txidx is None:
			return None

		block = self.getblock(txidx.blkhash)
		for tx in block.vtx:
			tx.calc_sha256()
			if tx.sha256 == txhash:
				return tx

		self.log.write("ERROR: Missing TX %064x in block %064x" % (txhash, txidx.blkhash))
		return None

	def haveblock(self, blkhash, checkorphans):
		if self.blk_cache.exists(blkhash):
			return True
		if checkorphans and blkhash in self.orphans:
			return True
		ser_hash = ser_uint256(blkhash)
		if ser_hash in self.blocks:
			return True
		return False

	def have_prevblock(self, block):
		if self.getheight() < 0 and block.sha256 == self.netmagic.block0:
			return True
		if self.haveblock(block.hashPrevBlock, False):
			return True
		return False

	def getblock(self, blkhash):
		block = self.blk_cache.get(blkhash)
		if block is not None:
			return block

		ser_hash = ser_uint256(blkhash)
		if ser_hash not in self.blocks:
			return None

		f = cStringIO.StringIO(self.blocks[ser_hash])
		block = CBlock()
		block.deserialize(f)

		self.blk_cache.put(blkhash, block)

		return block

	def spend_txout(self, txhash, n_idx):
		txidx = self.gettxidx(txhash)
		if txidx is None:
			return False

		txidx.spentmask |= (1L << n_idx)
		self.puttxidx(txhash, txidx)

		return True

	def clear_txout(self, txhash, n_idx):
		txidx = self.gettxidx(txhash)
		if txidx is None:
			return False

		txidx.spentmask &= ~(1L << n_idx)
		self.puttxidx(txhash, txidx)

		return True

	def unique_outpts(self, block):
		outpts = {}
		txmap = {}
		for tx in block.vtx:
			if tx.is_coinbase:
				continue
			txmap[tx.sha256] = tx
			for txin in tx.vin:
				v = (txin.prevout.hash, txin.prevout.n)
				if v in outs:
					return None

				outpts[v] = False

		return (outpts, txmap)

	def spent_outpts(self, block):
		# list of outpoints this block wants to spend
		l = self.unique_outpts(block)
		if l is None:
			return None
		outpts = l[0]
		txmap = l[1]
		spendlist = {}

		# pass 1: if outpoint in db, make sure it is unspent
		for k in outpts.iterkeys():
			txidx = self.gettxidx(k[0])
			if txidx is None:
				continue

			if k[1] > 100000:	# outpoint index sanity check
				return None

			if txidx.spentmask & (1L << k[1]):
				return None

			outpts[k] = True	# skip in pass 2

		# pass 2: remaining outpoints must exist in this block
		for k, v in outpts.iteritems():
			if v:
				continue

			if k[0] not in txmap:	# validate txout hash
				return None

			tx = txmap[k[0]]	# validate txout index (n)
			if k[1] >= len(tx.vout):
				return None

			# outpts[k] = True	# not strictly necessary

		return outpts.keys()
示例#45
0
class TSSGRepository(LoggerMixin, TeagleRepository):	
	ROLE_CUSTOMER = LazyRepoEntity(PersonRole, 1)
	ROLE_ADMIN = LazyRepoEntity(PersonRole, 2)
	ROLE_PARTNER = LazyRepoEntity(PersonRole, 3)
	
	VCT_STATE_BOOKED = LazyRepoEntity(VctState, 10)
	
	RESOURCE_INSTANCE_STATE_PROVISIONED = LazyRepoEntity(ResourceInstanceState, 3)
	RESOURCE_INSTANCE_STATE_UNPROVISIONED = LazyRepoEntity(ResourceInstanceState, 8)
	
	def __init__(self, uri, username = None, password = None, lock = None, classes = None, timeout = None, *args, **kw):
		super(TSSGRepository, self).__init__(*args, **kw)
		
		#raise Exception(uri)
		
		if lock is None:
			from ngniutils.threading import RWLock
			lock = RWLock() 
			
		self.timeout = timeout
		self.__lock = lock
		self.__serializer = TSSGSerializer.TSSGSerializer(self)
		self.__executor = RestExecutor(uri = uri, username = username, password = password, content_type = "text/xml")
		self.__cache = Cache(self, classes, timeout = timeout)
		self.__refresh_done = threading.Condition()
		self.__refreshing = False
		
		if classes:
			import teagle.repository.entities
			teagle.repository.entities._classes = classes
		
		self.refresh()
		
	def refresh(self):
		with self.__lock.read_transaction(self.timeout):
			self.__refresh_done.acquire()
			if not self.__refreshing:
				self.__refreshing = True
				self.__refresh_done.release()
				try:
					self._do_refresh()
				finally:
					self.__refreshing = False
					with self.__refresh_done:
						self.__refresh_done.notify_all()
			else:
				self.__refresh_done.wait()
				assert(not self.__refreshing)
				self.__refresh_done.release()
				
	def _do_refresh(self):
		self.__cache.refresh()
				
	def list_entities(self, klass, order_by = None, owns = {}, order_desc = False, **filtr):
		if isinstance(klass, basestring):
			klass = get_entity_class(klass)
		entities = self.__cache.list_entities(klass)
		
		if order_by is None and entities and hasattr(entities[0], "commonName"):
			order_by = "commonName"
		
		for k, v in filtr.iteritems():
			#entities = [ e for e in entities if getattr(e, k) == v ]
			entities = filter(lambda e: getattr(e, k) == v, entities)
			
		for k, v in owns.iteritems():
			#entities = [ e for e in entities if v in getattr(e, k) ]
			entities = filter(lambda e: v in getattr(e, k), entities)
		
		if order_by and order_by[0] != "_":
			try:
				entities.sort(key = lambda x: getattr(x, order_by))
			except AttributeError:
				self.logger.exception("Error sorting result")
			else:
				if order_desc:
					entities.reverse()
		
		return tuple(entities)
	
	def get_unique_entity(self, klass, owns = {}, **filter):
		entities = self.list_entities(klass, order_by = False, owns = owns, **filter)
		
		if not entities:
			self.logger.debug("No unique entity found for %s (owns=%s, filter=%s). retrying after refresh." % (klass.__name__, owns, filter))
			self.refresh()
			entities = self.list_entities(klass, order_by = False, owns = owns, **filter)
			if not entities:
				raise NoEntityFound(klass, filter)
		
		if len(entities) > 1:
			raise MultipleEntitiesFound("Multiple entities found for %s owning %s with filter %s" & (klass, owns, filter))
		
		return entities[0]

	def _get_entity(self, klass, id):
		#print ("get2", klass, id)	

		return self.__cache.get_entity(klass, id)
	
	def _get_entity_unlocked(self, klass, id):
		#print ("get2", klass, id)	

		return self.__cache.get_entity_unlocked(klass, id)

	def get_entity(self, klass, id):
		if isinstance(klass, basestring):
			klass = get_entity_class(klass)
		id = int(id)
		try:
			return self._get_entity(klass, id)
		except NoEntityFound:
			self.logger.debug("No entity found for %s-%s. retrying after refresh." % (klass.__name__, id))
			self.refresh()
			return self._get_entity(klass, id)

	def _do_persist(self, persisting):
		for e in persisting:
			self.__do_persist(e)

	def persist(self, entity):
		self.logger.debug("persist: %s" % (entity, ))
		persisting = []
		considered = set()
		
		with self.__lock.write_transaction(self.timeout):
			self.__persist(entity, persisting, considered)
			
			#classes = set( p.__class__ for p in persisting )
			
			persisting.reverse()
			
			self.logger.debug("need persisting: %s" % (persisting, ))
						
			self._do_persist(persisting)
		
		if persisting:			
			self.refresh()
			
	def delete_entity(self, entity):
		self._do_delete_entity(entity)
		self.refresh()
		
	def _do_delete_entity(self, entity):
		self.__executor.delete(entity)

	def __persist(self, entity, persisting, considered):
		if isinstance(entity, (list, set, tuple, frozenset)):
			for e in entity:
				assert isinstance(e, Entity), "Strange value in collection: %s" % (e, )
				self.__persist_entity(e, persisting, considered)
		else:
			self.__persist_entity(entity, persisting, considered)
	
	def __persist_entity(self, entity, persisting, considered):
		assert(isinstance(entity, Entity))
		
		#self.logger.debug("Considering entity: %s" % (entity, ))
		if entity not in considered:
			considered.add(entity)
			
			if entity.is_updated:
				persisting.append(entity)
			
			for v in entity._get_fields().itervalues():
				#self.logger.debug("Considering fieldvalue: %s" % (v, ))
				if isinstance(v, (Entity, list, set, tuple, frozenset)):
					if isinstance(v, Entity) and not v.is_persistent:
						assert(not isinstance(v, Person))
						entity.set_is_updated(True)
						if entity not in persisting:
							persisting.append(entity)
					self.__persist(v, persisting, considered)
				
	def __do_persist(self, e):
		self.logger.debug("executing persist for %s" % (e, ))
		xml = self.__serializer.serialize(e)
		#logger.debug("xml: %s" % (xml, ))
		if e.is_persistent:
			self.logger.debug("Updating: %s" % (e, ))
			self.__executor.update(e, xml)
		else:
			self.logger.debug("Adding: %s" % (e, ))
			xml = self.__executor.add(e, xml)
			with xml:
				id, values = self.__serializer.unserialize_entity(xml, e.__class__)
			self.logger.debug("Received after add: %s %s" % (id, values))
			e._set_id(id)
			e._set_fields(values)
			e._set_repository(self)
			e.set_is_updated(False)
			self.__cache.put_entity(e)
	
	def _list_data(self, klass):
		with self.__executor.list(klass) as xml:
			for x in self.__serializer.unserialize_values(xml, klass):
				yield x
		
	def make_password(self, password):
		return Password(md5(password).hexdigest())
	
	def check_password(self, password, target):
		if hasattr(target, "password"):
			target = target.password
		return self.make_password(password) == target
		
	def authenticate_user(self, username, password):
		user = self.get_unique_entity(Person, commonName = username)
		
		if not self.check_password(password, user):
			raise AuthenticationError(username)
		
		return user