示例#1
0
    def datagramReceived(self, datagram, address):
        time_keeper = TimeKeeper()

        if self.noisy:
            log.msg("received datagram from %s" % repr(address))
        if len(datagram) < 22:
            log.msg("received datagram too small from %s, ignoring" %
                    repr(address))
            return

        msgID = datagram[1:21]
        time_keeper.start_clock()
        data = umsgpack.unpackb(datagram[21:])
        time_keeper.stop_clock("time_unpack_msg")

        # self.log.debug("[BENCH] LOW RPC RECEIVE -> %s " % (time_keeper.get_summary(),))

        if datagram[:1] == b'\x00':
            self._acceptRequest(msgID, data, address)
        elif datagram[:1] == b'\x01':
            self._acceptResponse(msgID, data, address)
        else:
            # otherwise, don't know the format, don't do anything
            log.msg("Received unknown message from %s, ignoring" %
                    repr(address))
示例#2
0
        def func(address, node_id, *args):
            time_keeper = TimeKeeper()
            msgID = sha1(os.urandom(32)).digest()
            assert len(node_id) == 20
            time_keeper.start_clock()
            data = umsgpack.packb([str(name), node_id, args])
            time_keeper.stop_clock("time_data_pack")

            if len(data) > self.max_packet_size:
                msg = "Total length of function name and arguments cannot exceed 8K"
                raise MalformedMessage(msg)
            txdata = b'\x00' + msgID + data
            if self.noisy:
                log.msg("calling remote function %s on %s (msgid %s)" %
                        (name, address, b64encode(msgID)))
            time_keeper.start_clock()
            self.transport.write(txdata, address)
            time_keeper.stop_clock("time_write_socket")

            # self.log.debug("[BENCH] LOW RPC SEND TIMES %s -> %s " % (str(name), time_keeper.get_summary()))

            d = defer.Deferred()
            timeout = reactor.callLater(self._waitTimeout, self._timeout,
                                        msgID)
            self._outstanding[msgID] = (d, timeout)
            return d
示例#3
0
    def render_POST(self, request):
        if len(request.prepath) < 4:
            request.setResponseCode(400)
            return json.dumps({'error': "Illegal URL"})
        try:
            time_keeper = TimeKeeper()
            total_time_id = time_keeper.start_clock_unique()

            nodeid = unhexlify(request.prepath[1])
            source_ip = request.client.host
            source_port = int(request.prepath[2])
            kad_key = unhexlify(request.prepath[3])

            source = Node(nodeid, source_ip, source_port)

            time_keeper.start_clock()
            self.rpc_protocol.welcomeIfNewNode(source)
            time_keeper.stop_clock(ENTRY_TIME_WELCOME_NODE)

            encoded_chunk = request.content.read()

            chunk = CloudChunk.decode(encoded_chunk)

            if not digest(chunk.key) == kad_key:
                request.setResponseCode(400)
                return json.dumps({'error': "key missmatch"})

            def handle_policy(policy):
                time_keeper.stop_clock(ENTRY_FETCH_POLICY)

                id = time_keeper.start_clock_unique()
                self.storage.store_check_chunk(chunk,
                                               None,
                                               policy,
                                               time_keeper=time_keeper)
                time_keeper.stop_clock_unique(ENTRY_STORE_CHECK, id)

                time_keeper.stop_clock_unique(ENTRY_TOTAL_STORE_LOCAL,
                                              total_time_id)
                self.log.debug("%s %s %s" % (BENCH_TAG, TYPE_STORE_CHUNK_LOCAL,
                                             time_keeper.get_summary()))
                request.write(json.dumps({'value': "ok"}))
                request.finish()

            time_keeper.start_clock()
            self.talos_vc.get_policy_with_txid(
                chunk.get_tag_hex()).addCallback(handle_policy)
            return NOT_DONE_YET
        except InvalidChunkError as e:
            request.setResponseCode(400)
            return json.dumps({'error': e.value})
        except TalosVCRestClientError:
            request.setResponseCode(400)
            return "ERROR: No policy found"
        except:
            request.setResponseCode(400)
            return json.dumps({'error': "Error occured"})
def run_benchmark_fetch_par(num_rounds, data_path, num_entries, granularity, fetch_granularity, num_threads, out_logger,
                            private_key=BitcoinVersionedPrivateKey(PRIVATE_KEY),
                            policy_nonce=base64.b64decode(NONCE), stream_id=STREAMID,
                            txid=TXID, ip=IP, port=PORT):
    key = os.urandom(32)
    identifier = DataStreamIdentifier(private_key.public_key().address(), stream_id, policy_nonce, txid)

    dht_api_client = DHTRestClient(dhtip=ip, dhtport=port)

    for block_id, chunk_data in enumerate(extract_eth_smartmeter_data(data_path, granularity, max_entries=num_entries)):
        try:
            chunk = generate_cloud_chunk(chunk_data, private_key, block_id, identifier, key=key)
            dht_api_client.store_chunk(chunk)
            print "Store chunk %d Num entries: %d" % (block_id, len(chunk_data.entries))
        except DHTRestClientException as e:
            print "Store round %d error: %s" % (block_id, e)

    num_fetches = num_entries / granularity
    if not num_entries % granularity == 0:
        num_fetches += 1

    for x in fetch_granularity:
        num_fetches = x / granularity
        if not x % granularity == 0:
            num_fetches += 1

        for round in range(num_rounds):
            time_keeper = TimeKeeper()
            results = [[]] * num_threads

            if num_fetches < num_threads:
                temp_threads = num_fetches
                print "Num Fetches: %d Temp_threads %d" % (num_fetches, temp_threads)
                threads = [Fetchjob(idx, results, DHTRestClient(dhtip=ip, dhtport=port), block_id, private_key, identifier)
                           for idx, block_id in enumerate(splitting(range(num_fetches), temp_threads))]
            else:
                threads = [Fetchjob(idx, results, DHTRestClient(dhtip=ip, dhtport=port), block_id, private_key, identifier)
                           for idx, block_id in enumerate(splitting(range(num_fetches), num_threads))]
            time_keeper.start_clock()
            map(lambda x: x.start(), threads)
            map(lambda x: x.join(), threads)
            time_keeper.stop_clock("time_fetch_all")
            time_keeper.store_value("num_entries", x)
            time_keeper.store_value("num_blocks", num_fetches)
            time_keeper.store_value("round", round)
            chunks = [item for sublist in results for item in sublist]
            if len(chunks) == num_fetches:
                print "Round %d ok Num results: %d" % (round, num_fetches)
            else:
                print "Round %d ok Num results: %d" % (round, num_fetches)
            for idx, chunk in enumerate(chunks):
                if chunk is None:
                    print "No result for chunk %d " % idx
            out_logger.log_times_keeper(time_keeper)
        out_logger.flush_to_db()
    print "DONE"
示例#5
0
def run_benchmark_fetch_par(
        num_rounds,
        num_fetches,
        num_threads,
        out_logger,
        private_key=BitcoinVersionedPrivateKey(PRIVATE_KEY),
        policy_nonce=base64.b64decode(NONCE),
        stream_id=STREAMID,
        txid=TXID,
        ip=IP,
        port=PORT,
        chunk_size=100000):
    key = os.urandom(32)
    identifier = DataStreamIdentifier(private_key.public_key().address(),
                                      stream_id, policy_nonce, txid)

    dht_api_client = DHTRestClient(dhtip=ip, dhtport=port)

    for round_bench in range(num_fetches):
        try:
            chunk = generate_random_chunk(private_key,
                                          round_bench,
                                          identifier,
                                          key=key,
                                          size=chunk_size)
            dht_api_client.store_chunk(chunk)
            print "Store chunk %d" % (round_bench, )
        except DHTRestClientException as e:
            print "Store round %d error: %s" % (round_bench, e)

    for round in range(num_rounds):
        time_keeper = TimeKeeper()
        results = [[]] * num_threads
        threads = [
            Fetchjob(idx, results, DHTRestClient(dhtip=ip, dhtport=port),
                     block_id, private_key, identifier)
            for idx, block_id in enumerate(
                splitting(range(num_fetches), num_threads))
        ]
        time_keeper.start_clock()
        map(lambda x: x.start(), threads)
        map(lambda x: x.join(), threads)
        time_keeper.stop_clock("time_fetch_all")
        chunks = [item for sublist in results for item in sublist]
        if len(chunks) == num_fetches:
            print "Round %d ok Num results: %d" % (round, num_fetches)
        else:
            print "Round %d ok Num results: %d" % (round, num_fetches)
        for idx, chunk in enumerate(chunks):
            if chunk is None:
                print "No result for chunk %d " % idx
        out_logger.log_times_keeper(time_keeper)
    out_logger.flush_to_db()
    print "DONE"
示例#6
0
    def render_POST(self, request):
        msg = json.loads(request.content.read())
        timekeeper = TimeKeeper()
        total_time_id = timekeeper.start_clock_unique()
        try:
            timekeeper.start_clock()
            token = get_and_check_query_token(msg)
            check_query_token_valid(token)
            timekeeper.stop_clock(ENTRY_CHECK_TOKEN_VALID)

            # Check nonce ok
            if not self._check_cache(token.nonce):
                raise InvalidQueryToken("Nonce not valid")

            def handle_policy(policy):
                timekeeper.stop_clock(ENTRY_FETCH_POLICY)
                if policy is None:
                    request.setResponseCode(400)
                    request.write("No Policy Found")
                    request.finish()
                # check policy for correctness
                id = timekeeper.start_clock_unique()
                chunk = self.storage.get_check_chunk(token.chunk_key,
                                                     token.pubkey,
                                                     policy,
                                                     time_keeper=timekeeper)
                timekeeper.stop_clock_unique(ENTRY_GET_AND_CHECK, id)
                timekeeper.stop_clock_unique(ENTRY_TOTAL_LOCAL_QUERY,
                                             total_time_id)

                self.log.debug("%s %s %s" % (BENCH_TAG, TYPE_QUERY_CHUNK_LOCAL,
                                             timekeeper.get_summary()))
                request.write(chunk.encode())
                request.finish()

            timekeeper.start_clock()
            self.talos_vc.get_policy(token.owner,
                                     token.streamid).addCallback(handle_policy)
            return NOT_DONE_YET
        except InvalidQueryToken:
            request.setResponseCode(400)
            return "ERROR: token verification failure"
        except TalosVCRestClientError:
            request.setResponseCode(400)
            return "ERROR: No policy found"
        except:
            request.setResponseCode(400)
            return "ERROR: error occured"
示例#7
0
    def rpc_store(self, sender, nodeid, key, value):
        source = Node(nodeid, sender[0], sender[1])
        time_keeper = TimeKeeper()
        total_time_id = time_keeper.start_clock_unique()

        time_keeper.start_clock()
        self.welcomeIfNewNode(source)
        time_keeper.stop_clock(ENTRY_TIME_WELCOME_NODE)

        self.log.debug("got a store request from %s, storing value" %
                       str(sender))
        try:

            chunk = CloudChunk.decode(value)

            if not digest(chunk.key) == key:
                return {'error': 'key missmatch'}

            def handle_policy(policy):
                time_keeper.stop_clock(ENTRY_FETCH_POLICY)

                # Hack no chunk id given -> no key checks, key is in the encoded chunk
                id = time_keeper.start_clock_unique()
                self.storage.store_check_chunk(chunk,
                                               None,
                                               policy,
                                               time_keeper=time_keeper)
                time_keeper.stop_clock_unique(ENTRY_STORE_CHECK, id)

                time_keeper.stop_clock_unique(ENTRY_TOTAL_STORE_LOCAL,
                                              total_time_id)
                self.log.debug("%s %s %s" % (BENCH_TAG, TYPE_STORE_CHUNK_LOCAL,
                                             time_keeper.get_summary()))
                return {'value': 'ok'}

            time_keeper.start_clock()
            return self.talos_vc.get_policy_with_txid(
                chunk.get_tag_hex()).addCallback(handle_policy)
        except InvalidChunkError as e:
            return {'error': e.value}
        except TalosVCRestClientError:
            return {'error': "No policy found"}
示例#8
0
def run_benchmark_s3_talos_fetch(
        num_rounds,
        num_gets,
        out_logger,
        bucket_name,
        private_key=BitcoinVersionedPrivateKey(PRIVATE_KEY),
        policy_nonce=base64.b64decode(NONCE),
        stream_id=STREAMID,
        txid=TXID,
        chunk_size=100000,
        num_threads=None,
        do_store=True,
        do_delete=True,
        avoid_token_create=True):
    key = os.urandom(32)
    owner = private_key.public_key().address()
    identifier = DataStreamIdentifier(owner, stream_id, policy_nonce, txid)
    vc_client = TalosVCRestClient()
    storage = TalosS3Storage(bucket_name)

    num_threads = num_threads or num_gets
    if do_store:
        print "Store in S3"
        for iter in range(num_gets):
            chunk = generate_random_chunk(private_key,
                                          iter,
                                          identifier,
                                          key=key,
                                          size=chunk_size)
            store_chunk(storage, vc_client, chunk)

    if avoid_token_create:
        token_storage = []
        for block_id in range(num_gets):
            token = generate_query_token(
                identifier.owner, identifier.streamid, str(bytearray(16)),
                identifier.get_key_for_blockid(block_id), private_key)
            token_storage.append(token)
    else:
        token_storage = None

    for round in range(num_rounds):
        try:
            time_keeper = TimeKeeper()
            results = [[]] * num_threads
            threads = [
                FetchTalosThread(idx,
                                 results,
                                 TalosS3Storage(bucket_name),
                                 block_id,
                                 private_key,
                                 identifier,
                                 vc_client,
                                 token_store=token_storage)
                for idx, block_id in enumerate(
                    splitting(range(num_gets), num_threads))
            ]
            time_keeper.start_clock()
            map(lambda x: x.start(), threads)
            map(lambda x: x.join(), threads)
            time_keeper.stop_clock("time_fetch_all")
            chunks = [item for sublist in results for item in sublist]
            if len(chunks) == num_gets:
                print "Round %d ok Num results: %d" % (round, num_gets)
            else:
                print "Round %d ok Num results: %d" % (round, num_gets)
            out_logger.log_times_keeper(time_keeper)
        except Exception as e:
            print "Round %d error: %s" % (round, e)
    print "DONE"
    if do_delete:
        clean_bucket(storage.s3, bucket_name)