def run_benchmark_fetch_par(num_rounds, data_path, num_entries, granularity, fetch_granularity, num_threads, out_logger,
                            private_key=BitcoinVersionedPrivateKey(PRIVATE_KEY),
                            policy_nonce=base64.b64decode(NONCE), stream_id=STREAMID,
                            txid=TXID, ip=IP, port=PORT):
    key = os.urandom(32)
    identifier = DataStreamIdentifier(private_key.public_key().address(), stream_id, policy_nonce, txid)

    dht_api_client = DHTRestClient(dhtip=ip, dhtport=port)

    for block_id, chunk_data in enumerate(extract_eth_smartmeter_data(data_path, granularity, max_entries=num_entries)):
        try:
            chunk = generate_cloud_chunk(chunk_data, private_key, block_id, identifier, key=key)
            dht_api_client.store_chunk(chunk)
            print "Store chunk %d Num entries: %d" % (block_id, len(chunk_data.entries))
        except DHTRestClientException as e:
            print "Store round %d error: %s" % (block_id, e)

    num_fetches = num_entries / granularity
    if not num_entries % granularity == 0:
        num_fetches += 1

    for x in fetch_granularity:
        num_fetches = x / granularity
        if not x % granularity == 0:
            num_fetches += 1

        for round in range(num_rounds):
            time_keeper = TimeKeeper()
            results = [[]] * num_threads

            if num_fetches < num_threads:
                temp_threads = num_fetches
                print "Num Fetches: %d Temp_threads %d" % (num_fetches, temp_threads)
                threads = [Fetchjob(idx, results, DHTRestClient(dhtip=ip, dhtport=port), block_id, private_key, identifier)
                           for idx, block_id in enumerate(splitting(range(num_fetches), temp_threads))]
            else:
                threads = [Fetchjob(idx, results, DHTRestClient(dhtip=ip, dhtport=port), block_id, private_key, identifier)
                           for idx, block_id in enumerate(splitting(range(num_fetches), num_threads))]
            time_keeper.start_clock()
            map(lambda x: x.start(), threads)
            map(lambda x: x.join(), threads)
            time_keeper.stop_clock("time_fetch_all")
            time_keeper.store_value("num_entries", x)
            time_keeper.store_value("num_blocks", num_fetches)
            time_keeper.store_value("round", round)
            chunks = [item for sublist in results for item in sublist]
            if len(chunks) == num_fetches:
                print "Round %d ok Num results: %d" % (round, num_fetches)
            else:
                print "Round %d ok Num results: %d" % (round, num_fetches)
            for idx, chunk in enumerate(chunks):
                if chunk is None:
                    print "No result for chunk %d " % idx
            out_logger.log_times_keeper(time_keeper)
        out_logger.flush_to_db()
    print "DONE"
    else:
        logger = SQLLiteBenchmarkLogger(args.log_db, LOGGING_FIELDS, "%s" % (args.name,))

    try:
        for chunk_size in args.chunk_sizes:
            time_keeper = TimeKeeper()
            size_plain = 0
            size_compressed = 0
            print "Chunk size: %d" % chunk_size
            if args.do_smartmeter:
                for chunk in extract_eth_smartmeter_data(args.data_path, chunk_size):
                    encoded = chunk.encode()
                    data_compressed = compress_data(encoded)
                    #print "Before: %d After: %d" % (len(encoded), len(data_compressed))
                    size_plain += len(encoded)
                    size_compressed += len(data_compressed)
            else:
                for chunk in extract_eth_plug_data(args.data_path, chunk_size, 1, 1):
                    encoded = chunk.encode()
                    data_compressed = compress_data(encoded)
                    print "Before: %d After: %d" % (len(encoded), len(data_compressed))
                    size_plain += len(encoded)
                    size_compressed += len(data_compressed)
            print "Chunk size: %d DONE" % chunk_size
            time_keeper.store_value("num_chunk_entries", chunk_size)
            time_keeper.store_value("size_before", size_plain)
            time_keeper.store_value("size_compressed", size_compressed)
            logger.log_times_keeper(time_keeper)
    finally:
        logger.close()