def store(nodes): self.log.info("setting '%s' on %s" % (hkey, map(str, nodes))) # if this node is close too, then store here as well if self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]): chunk = CloudChunk.decode(value) if not digest(chunk.key) == dkey: return {'error': 'key missmatch'} def handle_policy(policy): time_keeper.stop_clock(ENTRY_FETCH_POLICY) # Hack no chunk id given -> no key checks, key is in the encoded chunk id = time_keeper.start_clock_unique() self.storage.store_check_chunk(chunk, None, policy, time_keeper=time_keeper) time_keeper.stop_clock_unique(ENTRY_STORE_CHECK, id) id = time_keeper.start_clock_unique() ds = [self.protocol.callStore(n, dkey, value) for n in nodes] return defer.DeferredList(ds).addCallback(_anyRespondSuccess, time_keeper, id, ENTRY_STORE_TO_ALL_NODES) if not policy_in is None: return handle_policy(policy_in) time_keeper.start_clock() return self.talos_vc.get_policy_with_txid(chunk.get_tag_hex()).addCallback(handle_policy) id = time_keeper.start_clock_unique() ds = [self.protocol.callStore(n, dkey, value) for n in nodes] return defer.DeferredList(ds).addCallback(_anyRespondSuccess, time_keeper, id, ENTRY_STORE_TO_ALL_NODES)
def render_POST(self, request): time_keeper = TimeKeeper() time_id = time_keeper.start_clock_unique() def respond(result): time_keeper.stop_clock_unique(ENTRY_TOTAL_ADD_CHUNK, time_id) self.log.debug( "%s %s %s" % (BENCH_TAG, TYPE_ADD_CHUNK, time_keeper.get_summary())) if not result is None: request.setResponseCode(200) request.write("OK") else: request.setResponseCode(400) request.write("ERROR") request.finish() encoded_chunk = request.content.read() try: chunk = CloudChunk.decode(encoded_chunk) self.dhtstorage.store_chunk( chunk, time_keeper=time_keeper).addCallback(respond) return NOT_DONE_YET except InvalidChunkError: request.setResponseCode(400) return "ERROR: Invalid Chunk" except TalosVCRestClientError: request.setResponseCode(400) return "ERROR: No Policy found" except: request.setResponseCode(400) return "ERROR"
def get_chunk(self, chunk_key, time_keeper=TimeKeeper(), do_plain=True): time_keeper.start_clock() chunk = get_data_s3(self.s3, binascii.hexlify(chunk_key), self.bucket_name) if do_plain: chunk = ChunkData.decode(chunk) else: chunk = CloudChunk.decode(chunk) time_keeper.stop_clock("time_s3_get_chunk") return chunk
def store_chunk(chunkid): encoded_chunk = request.get_data() try: chunk = CloudChunk.decode(encoded_chunk) policy = get_policy_with_txid(chunk.get_tag_hex()) storage = get_storage() storage.store_check_chunk(chunk, chunkid, policy) return "OK", 200 except InvalidChunkError: return "ERROR Invalid chunk", 400
def _get_chunk(self, chunk_key): try: encoded = self.db.Get(chunk_key) except KeyError: return None _, bin_chunk = get_time_and_chunk(encoded) def store_update(): self.db.Put(chunk_key, add_time_chunk(bin_chunk)) threads.deferToThread(store_update) return CloudChunk.decode(bin_chunk)
def test_get_image(self): block_id = 1 owner = PRIVATE_KEY.public_key().address() stream_ident = DataStreamIdentifier(owner, 3, NONCE, TXID) chunk = self._test_get_chunk_for_blockid(owner, stream_ident, block_id) print len(chunk) chunk_ = CloudChunk.decode(str(chunk)) data = get_chunk_data_from_cloud_chunk(chunk_, "a" * 16) print chunk_.get_key_hex() print data.entries[0].metadata print hash(data.entries[0].picture_data) img = Image.open(StringIO(data.entries[0].picture_data)) img.show()
def fetch_chunk(self, block_id, private_key, stream_identifier, time_keeper=TimeKeeper()): time_keeper.start_clock() reason, code, address = get_chunk_addr( self.session, stream_identifier.get_key_for_blockid(block_id), self.dhtip, self.dhtport) time_keeper.stop_clock(TIME_FETCH_ADDRESS) if code != 200: raise DHTRestClientException("Fetch chunk location error", code, reason, address) [ip, port] = address.split(':') if self.tlsport != -1: port = self.tlsport time_keeper.start_clock() reason, code, nonce = get_nonce_peer(self.session, ip, int(port)) time_keeper.stop_clock(TIME_FETCH_NONCE) if code != 200: raise DHTRestClientException("Fetch nonce error", code, reason, nonce) nonce = str(nonce) token = generate_token(block_id, private_key, stream_identifier, nonce) time_keeper.start_clock() reason, code, chunk = get_chunk_peer(self.session, token.to_json(), ip, int(port)) time_keeper.stop_clock(TIME_FETCH_CHUNK) if code != 200: raise DHTRestClientException("Fetch chunk error", code, reason, chunk) return CloudChunk.decode(chunk)
def _get_chunk(self, chunk_key): return CloudChunk.decode( get_data_s3(self.s3, binascii.hexlify(chunk_key), self.bucket_name))
return d.addCallback(handle_response) for count in range(num_servers): storage = TalosLevelDBDHTStorage("./db/leveldb%d" % (count + 1, )) server = TalosDHTServer(ksize=4, storage=storage) server.listen(start_port + count) servers.append(server) def have_chunk(result, (client, old_chunk, block_id)): if result is None: print "No value :(" reactor.stop() return result = CloudChunk.decode(result) if isinstance(result, CloudChunk): print "Result %s" % result.get_base64_encoded() else: print result reactor.stop() def have_nonce(nonce, (client, ip, port, chunk, block_id)): token = generate_token(block_id, nonce) print "with token %s \n" % token.to_json() return client.get_chunk(ip, int(port), token).addCallback(have_chunk, (client, chunk, block_id))
def rpc_sayhi(self, sender, chunk, token): token = QueryToken.from_json(token) # This could return a Deferred as well. sender is (ip,port) chunk_orig = CloudChunk.decode(chunk) return "Tag is %s you live at %s:%i and token is %s" % (chunk_orig.get_tag_hex(), sender[0], sender[1], token.owner)