def _test_get_chunk_for_blockid(self, owner, stream_ident, block_Id):
     a, b, address = get_chunk_addr(
         stream_ident.get_key_for_blockid(block_Id))
     self.assertEquals(b, 200)
     self.assertTrue(not address is None)
     [ip, port] = address.split(':')
     a, b, nonce = get_nonce_peer(ip, int(port))
     self.assertEquals(b, 200)
     self.assertTrue(not nonce is None)
     nonce = str(nonce)
     token = generate_query_token(
         owner, STREAMID, nonce, stream_ident.get_key_for_blockid(block_Id),
         PRIVATE_KEY)
     a, b, chunk = get_chunk_peer(token.to_json(), ip, int(port))
     self.assertEquals(b, 200)
     self.assertTrue(not nonce is None)
     return chunk
示例#2
0
 def run(self):
     for block_id in self.blockids:
         try:
             key = self.stream_identifier.get_key_for_blockid(block_id)
             if self.token_store is None:
                 token = generate_query_token(
                     self.stream_identifier.owner,
                     self.stream_identifier.streamid, str(bytearray(16)),
                     key, self.private_key)
             else:
                 token = self.token_store[block_id]
                 #print "Token fetched"
             chunk = fetch_chunk(self.connection, self.vc_client, token)
             data = chunk.get_and_check_chunk_data(aes_key,
                                                   compression_used=False,
                                                   do_decode=False)
             self.result_store[self.my_id].append(data)
         except Exception:
             self.result_store[self.my_id].append(None)
             continue
示例#3
0
def run_benchmark_s3_talos_fetch(
        num_rounds,
        num_gets,
        out_logger,
        bucket_name,
        private_key=BitcoinVersionedPrivateKey(PRIVATE_KEY),
        policy_nonce=base64.b64decode(NONCE),
        stream_id=STREAMID,
        txid=TXID,
        chunk_size=100000,
        num_threads=None,
        do_store=True,
        do_delete=True,
        avoid_token_create=True):
    key = os.urandom(32)
    owner = private_key.public_key().address()
    identifier = DataStreamIdentifier(owner, stream_id, policy_nonce, txid)
    vc_client = TalosVCRestClient()
    storage = TalosS3Storage(bucket_name)

    num_threads = num_threads or num_gets
    if do_store:
        print "Store in S3"
        for iter in range(num_gets):
            chunk = generate_random_chunk(private_key,
                                          iter,
                                          identifier,
                                          key=key,
                                          size=chunk_size)
            store_chunk(storage, vc_client, chunk)

    if avoid_token_create:
        token_storage = []
        for block_id in range(num_gets):
            token = generate_query_token(
                identifier.owner, identifier.streamid, str(bytearray(16)),
                identifier.get_key_for_blockid(block_id), private_key)
            token_storage.append(token)
    else:
        token_storage = None

    for round in range(num_rounds):
        try:
            time_keeper = TimeKeeper()
            results = [[]] * num_threads
            threads = [
                FetchTalosThread(idx,
                                 results,
                                 TalosS3Storage(bucket_name),
                                 block_id,
                                 private_key,
                                 identifier,
                                 vc_client,
                                 token_store=token_storage)
                for idx, block_id in enumerate(
                    splitting(range(num_gets), num_threads))
            ]
            time_keeper.start_clock()
            map(lambda x: x.start(), threads)
            map(lambda x: x.join(), threads)
            time_keeper.stop_clock("time_fetch_all")
            chunks = [item for sublist in results for item in sublist]
            if len(chunks) == num_gets:
                print "Round %d ok Num results: %d" % (round, num_gets)
            else:
                print "Round %d ok Num results: %d" % (round, num_gets)
            out_logger.log_times_keeper(time_keeper)
        except Exception as e:
            print "Round %d error: %s" % (round, e)
    print "DONE"
    if do_delete:
        clean_bucket(storage.s3, bucket_name)
示例#4
0
def run_benchmark_s3_talos(num_rounds,
                           out_logger,
                           bucket_name,
                           private_key=BitcoinVersionedPrivateKey(PRIVATE_KEY),
                           policy_nonce=base64.b64decode(NONCE),
                           stream_id=STREAMID,
                           txid=TXID,
                           chunk_size=100000,
                           do_delete=True,
                           do_sig=False):
    key = os.urandom(32)
    owner = private_key.public_key().address()
    identifier = DataStreamIdentifier(owner, stream_id, policy_nonce, txid)
    vc_client = TalosVCRestClient()
    storage = TalosS3Storage(bucket_name)

    for round_bench in range(num_rounds):
        try:
            time_keeper = TimeKeeper()
            #chunk_data = generate_data(size=chunk_size)
            chunk_data = DummyData(8500)

            global_id = time_keeper.start_clock_unique()
            chunk = generate_random_chunk_from_data(chunk_data,
                                                    private_key,
                                                    round_bench,
                                                    identifier,
                                                    time_keeper=time_keeper)
            store_chunk(storage,
                        vc_client,
                        chunk,
                        time_keeper=time_keeper,
                        do_sig=do_sig)
            time_keeper.stop_clock_unique("time_s3_store_chunk", global_id)

            token_time_id = time_keeper.start_clock_unique()
            token = generate_query_token(owner, stream_id, str(bytearray(16)),
                                         chunk.key, private_key)
            time_keeper.stop_clock_unique("time_token_create", token_time_id)

            global_id = time_keeper.start_clock_unique()
            chunk = fetch_chunk(storage,
                                vc_client,
                                token,
                                global_id=global_id,
                                time_keeper=time_keeper)
            data = chunk.get_and_check_chunk_data(aes_key,
                                                  compression_used=False,
                                                  time_keeper=time_keeper,
                                                  do_decode=False)
            time_keeper.stop_clock_unique("time_s3_get_chunk", global_id)

            if chunk is None:
                print "Round %d error" % round_bench
            else:
                print "Round %d ok Chunk size: %d" % (round_bench,
                                                      len(chunk.encode()))

            out_logger.log_times_keeper(time_keeper)
        except Exception as e:
            print "Round %d error: %s" % (round_bench, e)
    print "DONE"
    if do_delete:
        clean_bucket(storage.s3, bucket_name)
示例#5
0
def generate_token(block_id, private_key, stream_ident, nonce):
    return generate_query_token(stream_ident.owner, stream_ident.streamid,
                                nonce,
                                stream_ident.get_key_for_blockid(block_id),
                                private_key)
示例#6
0
def generate_token(block_id, nonce):
    owner = PRIVATE_KEY.public_key().address()
    stream_ident = DataStreamIdentifier(owner, STREAMID, NONCE,
                                        TXID)
    return generate_query_token(owner, STREAMID, nonce, stream_ident.get_key_for_blockid(block_id), PRIVATE_KEY)