Exemple #1
0
def start_cluster(num_nodes,
                  num_observers,
                  num_shards,
                  config,
                  genesis_config_changes,
                  client_config_changes,
                  message_handler=None):
    if not config:
        config = load_config()

    if not os.path.exists(os.path.expanduser("~/.near/test0")):
        near_root, node_dirs = init_cluster(num_nodes, num_observers,
                                            num_shards, config,
                                            genesis_config_changes,
                                            client_config_changes)
    else:
        near_root = config['near_root']
        node_dirs = subprocess.check_output(
            "find ~/.near/test* -maxdepth 0",
            shell=True).decode('utf-8').strip().split('\n')
        node_dirs = list(
            filter(lambda n: not n.endswith('_finished'), node_dirs))
    ret = []

    proxy = NodesProxy(
        message_handler) if message_handler is not None else None

    def spin_up_node_and_push(i, boot_key, boot_addr):
        node = spin_up_node(config,
                            near_root,
                            node_dirs[i],
                            i,
                            boot_key,
                            boot_addr, [],
                            proxy,
                            skip_starting_proxy=True)
        while len(ret) < i:
            time.sleep(0.01)
        ret.append(node)
        return node

    boot_node = spin_up_node_and_push(0, None, None)

    handles = []
    for i in range(1, num_nodes + num_observers):
        handle = threading.Thread(target=spin_up_node_and_push,
                                  args=(i, boot_node.node_key.pk,
                                        boot_node.addr()))
        handle.start()
        handles.append(handle)

    for handle in handles:
        handle.join()

    for node in ret:
        node.start_proxy_if_needed()

    return ret
Exemple #2
0
def start_cluster(num_nodes,
                  num_observers,
                  num_shards,
                  config,
                  genesis_config_changes,
                  client_config_changes,
                  message_handler=None):
    if not config:
        config = load_config()

    dot_near = pathlib.Path.home() / '.near'
    if (dot_near / 'test0').exists():
        near_root = config['near_root']
        node_dirs = [
            str(dot_near / name) for name in os.listdir(dot_near)
            if name.startswith('test') and not name.endswith('_finished')
        ]
    else:
        near_root, node_dirs = init_cluster(num_nodes, num_observers,
                                            num_shards, config,
                                            genesis_config_changes,
                                            client_config_changes)

    proxy = NodesProxy(
        message_handler) if message_handler is not None else None
    ret = []

    def spin_up_node_and_push(i, boot_node: BootNode):
        single_node = (num_nodes == 1) and (num_observers == 0)
        node = spin_up_node(config,
                            near_root,
                            node_dirs[i],
                            i,
                            boot_node=boot_node,
                            proxy=proxy,
                            skip_starting_proxy=True,
                            single_node=single_node)
        ret.append((i, node))
        return node

    boot_node = spin_up_node_and_push(0, None)

    handles = []
    for i in range(1, num_nodes + num_observers):
        handle = threading.Thread(target=spin_up_node_and_push,
                                  args=(i, boot_node))
        handle.start()
        handles.append(handle)

    for handle in handles:
        handle.join()

    nodes = [node for _, node in sorted(ret)]
    for node in nodes:
        node.start_proxy_if_needed()

    return nodes
Exemple #3
0
    def elapsed_seconds(self) -> float:
        return time.monotonic() - self.__start

    def left_seconds(self) -> float:
        return self.__end - time.monotonic()


if __name__ == '__main__':
    manager = multiprocessing.Manager()
    hash_to_metadata = manager.dict()
    requests = manager.dict()
    responses = manager.dict()

    proxy = NodesProxy(
        partial(Handler,
                hash_to_metadata=hash_to_metadata,
                requests=requests,
                responses=responses))

    timeout = Timeout(TIMEOUT)

    logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)

    config = load_config()
    near_root, node_dirs = init_cluster(
        2,
        3,
        2,
        config,
        [
            ["min_gas_price", 0],
                if fr == 4:
                    hash_ = msg.Routed.body.PartialEncodedChunkRequest.chunk_hash
                    (height, shard_id) = hash_to_metadata[hash_]
                    print("REQ %s %s %s %s" % (height, shard_id, fr, to))
                    requests[(height, shard_id, to)] = 1

            if msg_kind == 'PartialEncodedChunkResponse':
                if to == 4:
                    hash_ = msg.Routed.body.PartialEncodedChunkResponse.chunk_hash
                    (height, shard_id) = hash_to_metadata[hash_]
                    print("RESP %s %s %s %s" % (height, shard_id, fr, to))
                    responses[(height, shard_id, fr)] = 1

        return True

proxy = NodesProxy(Handler)


started = time.time()

logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)

config = load_config()
near_root, node_dirs = init_cluster(
    4, 1, 2, config,
    [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", EPOCH_LENGTH],
     ["block_producer_kickout_threshold", 20],
     ["chunk_producer_kickout_threshold", 20],
     ["validators", 0, "amount", "110000000000000000000000000000000"],
     ["validators", 1, "amount", "110000000000000000000000000000000"],
     [