Example #1
0
def check_keys(new_root):
    keychain: Keychain = Keychain()
    all_sks = keychain.get_all_private_keys()
    if len(all_sks) == 0:
        print(
            "No keys are present in the keychain. Generate them with 'chia keys generate'"
        )
        return

    config: Dict = load_config(new_root, "config.yaml")
    pool_child_pubkeys = [
        master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks
    ]
    all_targets = []
    stop_searching_for_farmer = "xch_target_address" not in config["farmer"]
    stop_searching_for_pool = "xch_target_address" not in config["pool"]
    for i in range(500):
        if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
            break
        for sk, _ in all_sks:
            all_targets.append(
                encode_puzzle_hash(
                    create_puzzlehash_for_pk(
                        master_sk_to_wallet_sk(sk, uint32(i)).get_g1())))
            if all_targets[-1] == config["farmer"].get("xch_target_address"):
                stop_searching_for_farmer = True
            if all_targets[-1] == config["pool"].get("xch_target_address"):
                stop_searching_for_pool = True

    # Set the destinations
    if "xch_target_address" not in config["farmer"]:
        print(
            f"Setting the xch destination address for coinbase fees reward to {all_targets[0]}"
        )
        config["farmer"]["xch_target_address"] = all_targets[0]
    elif config["farmer"]["xch_target_address"] not in all_targets:
        print(
            f"WARNING: farmer using a puzzle hash which we don't have the private"
            f" keys for. Overriding "
            f"{config['farmer']['xch_target_address']} with {all_targets[0]}")
        config["farmer"]["xch_target_address"] = all_targets[0]

    if "pool" not in config:
        config["pool"] = {}
    if "xch_target_address" not in config["pool"]:
        print(
            f"Setting the xch destination address for coinbase reward to {all_targets[0]}"
        )
        config["pool"]["xch_target_address"] = all_targets[0]
    elif config["pool"]["xch_target_address"] not in all_targets:
        print(
            f"WARNING: pool using a puzzle hash which we don't have the private"
            f" keys for. Overriding "
            f"{config['pool']['xch_target_address']} with {all_targets[0]}")
        config["pool"]["xch_target_address"] = all_targets[0]

    # Set the pool pks in the farmer
    pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys)
    if "pool_public_keys" in config["farmer"]:
        for pk_hex in config["farmer"]["pool_public_keys"]:
            # Add original ones in config
            pool_pubkeys_hex.add(pk_hex)

    config["farmer"]["pool_public_keys"] = pool_pubkeys_hex
    save_config(new_root, "config.yaml", config)
Example #2
0
    async def test1(self, simulation):
        test_rpc_port = uint16(21522)
        test_rpc_port_2 = uint16(21523)
        harvester, farmer = simulation

        def stop_node_cb():
            pass

        def stop_node_cb_2():
            pass

        config = load_config(bt.root_path, "config.yaml")
        hostname = config["self_hostname"]
        daemon_port = config["daemon_port"]

        farmer_rpc_api = FarmerRpcApi(farmer)
        harvester_rpc_api = HarvesterRpcApi(harvester)

        rpc_cleanup = await start_rpc_server(
            farmer_rpc_api,
            hostname,
            daemon_port,
            test_rpc_port,
            stop_node_cb,
            connect_to_daemon=False,
        )
        rpc_cleanup_2 = await start_rpc_server(
            harvester_rpc_api,
            hostname,
            daemon_port,
            test_rpc_port_2,
            stop_node_cb_2,
            connect_to_daemon=False,
        )

        try:
            client = await FarmerRpcClient.create("localhost", test_rpc_port)
            client_2 = await HarvesterRpcClient.create("localhost",
                                                       test_rpc_port_2)

            async def have_connections():
                return len(await client.get_connections()) > 0

            await time_out_assert(5, have_connections, True)

            await client.get_latest_challenges()

            async def have_challenges():
                return len(await client.get_latest_challenges()) > 0

            await time_out_assert(5, have_challenges, True)

            async def have_plots():
                return len((await client_2.get_plots())["plots"]) > 0

            await time_out_assert(5, have_plots, True)

            res = await client_2.get_plots()
            num_plots = len(res["plots"])
            assert num_plots > 0
            plot_dir = get_plot_dir() / "subdir"
            plot_dir.mkdir(parents=True, exist_ok=True)
            plotter = DiskPlotter()
            filename = "test_farmer_harvester_rpc_plot.plot"
            plotter.create_plot_disk(
                str(plot_dir),
                str(plot_dir),
                str(plot_dir),
                filename,
                18,
                stream_plot_info(bt.pool_pk, bt.farmer_pk,
                                 AugSchemeMPL.key_gen(bytes([4] * 32))),
                token_bytes(32),
                128,
            )

            res_2 = await client_2.get_plots()
            assert len(res_2["plots"]) == num_plots

            print(await client_2.get_plot_directories())
            assert len(await client_2.get_plot_directories()) == 1

            await client_2.add_plot_directory(str(plot_dir))

            assert len(await client_2.get_plot_directories()) == 2

            res_2 = await client_2.get_plots()
            assert len(res_2["plots"]) == num_plots + 1

            await client_2.delete_plot(str(plot_dir / filename))
            res_3 = await client_2.get_plots()
            assert len(res_3["plots"]) == num_plots

            await client_2.remove_plot_directory(str(plot_dir))
            print(await client_2.get_plot_directories())
            assert len(await client_2.get_plot_directories()) == 1

        except AssertionError:
            # Checks that the RPC manages to stop the node
            client.close()
            client_2.close()
            await client.await_closed()
            await client_2.await_closed()
            await rpc_cleanup()
            await rpc_cleanup_2()
            raise

        client.close()
        client_2.close()
        await client.await_closed()
        await client_2.await_closed()
        await rpc_cleanup()
        await rpc_cleanup_2()
Example #3
0
 def __init__(self, args):
     self.args = args
     self.config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
Example #4
0
    def __init__(
        self,
        root_path,
        api: Any,
        node_type: NodeType,
        advertised_port: int,
        service_name: str,
        server_listen_ports: List[int] = [],
        connect_peers: List[PeerInfo] = [],
        auth_connect_peers: bool = True,
        on_connect_callback: Optional[OnConnectFunc] = None,
        rpc_info: Optional[Tuple[type, int]] = None,
        start_callback: Optional[Callable] = None,
        stop_callback: Optional[Callable] = None,
        await_closed_callback: Optional[Callable] = None,
        periodic_introducer_poll: Optional[Tuple[PeerInfo, int, int]] = None,
        parse_cli_args=True,
    ):
        net_config = load_config(root_path, "config.yaml")
        ping_interval = net_config.get("ping_interval")
        network_id = net_config.get("network_id")
        self.self_hostname = net_config.get("self_hostname")
        self.daemon_port = net_config.get("daemon_port")
        assert ping_interval is not None
        assert network_id is not None

        self._node_type = node_type
        self._service_name = service_name

        proctitle_name = f"chia_{service_name}"
        setproctitle(proctitle_name)
        self._log = logging.getLogger(service_name)
        if parse_cli_args:
            config = load_config_cli(root_path, "config.yaml", service_name)
        else:
            config = load_config(root_path, "config.yaml", service_name)
        initialize_logging(service_name, config["logging"], root_path)

        self._rpc_info = rpc_info

        self._server = ChiaServer(
            advertised_port,
            api,
            node_type,
            ping_interval,
            network_id,
            root_path,
            config,
            name=f"{service_name}_server",
        )
        for _ in ["set_server", "_set_server"]:
            f = getattr(api, _, None)
            if f:
                f(self._server)

        self._connect_peers = connect_peers
        self._auth_connect_peers = auth_connect_peers
        self._server_listen_ports = server_listen_ports

        self._api = api
        self._task = None
        self._is_stopping = False
        self._stopped_by_rpc = False

        self._periodic_introducer_poll = periodic_introducer_poll
        self._on_connect_callback = on_connect_callback
        self._start_callback = start_callback
        self._stop_callback = stop_callback
        self._await_closed_callback = await_closed_callback
        self._advertised_port = advertised_port
        self._server_sockets: List = []
Example #5
0
def get_plot_directories(root_path: Path) -> List[str]:
    config = load_config(root_path, "config.yaml")
    return [
        str(Path(str_path).resolve())
        for str_path in config["harvester"]["plot_directories"]
    ]
Example #6
0
def migrate_from(
    old_root: Path,
    new_root: Path,
    manifest: List[str],
    do_not_migrate_settings: List[str],
):
    """
    Copy all the files in "manifest" to the new config directory.
    """
    if old_root == new_root:
        print("same as new path, exiting")
        return 1
    if not old_root.is_dir():
        print(f"{old_root} not found - this is ok if you did not install this version.")
        return 0
    print(f"\n{old_root} found")
    print(f"Copying files from {old_root} to {new_root}\n")
    not_found = []
    for f in manifest:
        old_path = old_root / f
        new_path = new_root / f
        if old_path.is_file():
            print(f"{new_path}")
            mkdir(new_path.parent)
            shutil.copy(old_path, new_path)
        else:
            not_found.append(f)
            print(f"{old_path} not found, skipping")
    # update config yaml with new keys
    config: Dict = load_config(new_root, "config.yaml")
    config_str: str = initial_config_file("config.yaml")
    default_config: Dict = yaml.safe_load(config_str)
    flattened_keys = unflatten_properties({k: "" for k in do_not_migrate_settings})
    dict_add_new_default(config, default_config, flattened_keys)

    save_config(new_root, "config.yaml", config)

    # migrate plots
    # for now, we simply leave them where they are
    # and make what may have been relative paths absolute
    if "config/trusted.key" in not_found or "config/trusted.key" in not_found:
        initialize_ssl(new_root)

    plots_config: Dict = load_config(new_root, "plots.yaml")

    plot_root = (
        load_config(new_root, "config.yaml").get("harvester", {}).get("plot_root", ".")
    )

    old_plots_root: Path = path_from_root(old_root, plot_root)
    new_plots_root: Path = path_from_root(new_root, plot_root)

    old_plot_paths = plots_config.get("plots", {})
    if len(old_plot_paths) == 0:
        print("no plots found, no plots migrated")
        return 1

    print("\nmigrating plots.yaml")

    new_plot_paths: Dict = {}
    for path, values in old_plot_paths.items():
        old_path_full = path_from_root(old_plots_root, path)
        new_path_relative = make_path_relative(old_path_full, new_plots_root)
        print(f"rewriting {path}\n as {new_path_relative}")
        new_plot_paths[str(new_path_relative)] = values
    plots_config_new: Dict = {"plots": new_plot_paths}
    save_config(new_root, "plots.yaml", plots_config_new)
    print("\nUpdated plots.yaml to point to where your existing plots are.")
    print(
        "\nYour plots have not been moved so be careful deleting old preferences folders."
    )

    print("\nIf you want to move your plot files, you should also modify")
    print(f"{config_path_for_filename(new_root, 'plots.yaml')}")
    return 1
Example #7
0
async def netstorge_async(rpc_port: int, delta_block_height: str,
                          start: str) -> None:
    """
    Calculates the estimated space on the network given two block header hashes.
    """
    try:
        config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
        self_hostname = config["self_hostname"]
        if rpc_port is None:
            rpc_port = config["full_node"]["rpc_port"]
        client = await FullNodeRpcClient.create(self_hostname,
                                                uint16(rpc_port),
                                                DEFAULT_ROOT_PATH, config)

        if delta_block_height:
            if start == "":
                blockchain_state = await client.get_blockchain_state()
                if blockchain_state["peak"] is None:
                    print("No blocks in blockchain")
                    client.close()
                    await client.await_closed()
                    return None

                newer_block_height = blockchain_state["peak"].height
            else:
                newer_block = await client.get_block_record(
                    hexstr_to_bytes(start))
                if newer_block is None:
                    print("Block header hash", start, "not found.")
                    client.close()
                    await client.await_closed()
                    return None
                else:
                    print("newer_height", newer_block.height)
                    newer_block_height = newer_block.height

            newer_block_header = await client.get_block_record_by_height(
                newer_block_height)
            older_block_height = max(
                0, newer_block_height - int(delta_block_height))
            older_block_header = await client.get_block_record_by_height(
                older_block_height)
            network_space_bytes_estimate = await client.get_network_space(
                newer_block_header.header_hash, older_block_header.header_hash)
            print("Older Block\n"
                  f"Block Height: {older_block_header.height}\n"
                  f"Weight:           {older_block_header.weight}\n"
                  f"VDF Iterations:   {older_block_header.total_iters}\n"
                  f"Header Hash:      0x{older_block_header.header_hash}\n")
            print("Newer Block\n"
                  f"Block Height: {newer_block_header.height}\n"
                  f"Weight:           {newer_block_header.weight}\n"
                  f"VDF Iterations:   {newer_block_header.total_iters}\n"
                  f"Header Hash:      0x{newer_block_header.header_hash}\n")
            network_space_terabytes_estimate = network_space_bytes_estimate / 1024**4
            if network_space_terabytes_estimate > 1024:
                print(
                    f"The network has an estimated {network_space_terabytes_estimate / 1024:.3f} PiB"
                )
            else:
                print(
                    f"The network has an estimated {network_space_terabytes_estimate:.3f} TiB"
                )

    except Exception as e:
        if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
            print(
                f"Connection error. Check if full node rpc is running at {rpc_port}"
            )
        else:
            print(f"Exception {e}")

    client.close()
    await client.await_closed()
Example #8
0
def create_plots(args,
                 root_path,
                 use_datetime=True,
                 test_private_keys: Optional[List] = None):
    config_filename = config_path_for_filename(root_path, "config.yaml")

    if args.tmp2_dir is None:
        args.tmp2_dir = args.final_dir

    farmer_public_key: G1Element
    if args.farmer_public_key is not None:
        farmer_public_key = G1Element.from_bytes(
            bytes.fromhex(args.farmer_public_key))
    else:
        farmer_public_key = get_default_farmer_public_key()

    pool_public_key: G1Element
    if args.pool_public_key is not None:
        pool_public_key = bytes.fromhex(args.pool_public_key)
    else:
        pool_public_key = get_default_pool_public_key()
    if args.num is not None:
        num = args.num
    else:
        num = 1
    log.info(
        f"Creating {num} plots of size {args.size}, pool public key:  "
        f"{bytes(pool_public_key).hex()} farmer public key: {bytes(farmer_public_key).hex()}"
    )

    mkdir(args.tmp_dir)
    mkdir(args.tmp2_dir)
    mkdir(args.final_dir)
    finished_filenames = []
    config = load_config(root_path, config_filename)
    plot_filenames = get_plot_filenames(config["harvester"])
    for i in range(num):
        # Generate a random master secret key
        if test_private_keys is not None:
            assert len(test_private_keys) == num
            sk: PrivateKey = test_private_keys[i]
        else:
            sk = PrivateKey.from_seed(token_bytes(32))

        # The plot public key is the combination of the harvester and farmer keys
        plot_public_key = ProofOfSpace.generate_plot_public_key(
            master_sk_to_local_sk(sk).get_g1(), farmer_public_key)

        # The plot id is based on the harvester, farmer, and pool keys
        plot_id: bytes32 = ProofOfSpace.calculate_plot_id(
            pool_public_key, plot_public_key)
        dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M")

        if use_datetime:
            filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot"
        else:
            filename = f"plot-k{args.size}-{plot_id}.plot"
        full_path: Path = args.final_dir / filename

        if args.final_dir.resolve() not in plot_filenames:
            if (str(args.final_dir.resolve())
                    not in config["harvester"]["plot_directories"]):
                # Adds the directory to the plot directories if it is not present
                config = add_plot_directory(str(args.final_dir.resolve()),
                                            root_path)

        if not full_path.exists():
            log.info(f"Starting plot {i + 1}/{num}")
            # Creates the plot. This will take a long time for larger plots.
            plotter: DiskPlotter = DiskPlotter()
            plotter.create_plot_disk(
                str(args.tmp_dir),
                str(args.tmp2_dir),
                str(args.final_dir),
                filename,
                args.size,
                stream_plot_info(pool_public_key, farmer_public_key, sk),
                plot_id,
                args.buffer,
            )
            finished_filenames.append(filename)
        else:
            log.info(f"Plot {filename} already exists")

    log.info("Summary:")
    try:
        args.tmp_dir.rmdir()
    except Exception:
        log.info(
            f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty."
        )
    try:
        args.tmp2_dir.rmdir()
    except Exception:
        log.info(
            f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty."
        )
    log.info(f"Created a total of {len(finished_filenames)} new plots")
    for filename in finished_filenames:
        log.info(filename)
Example #9
0
def create_plots(args,
                 root_path,
                 use_datetime=True,
                 test_private_keys: Optional[List] = None):
    config_filename = config_path_for_filename(root_path, "config.yaml")
    config = load_config(root_path, config_filename)

    if args.tmp2_dir is None:
        args.tmp2_dir = args.tmp_dir

    farmer_public_key: G1Element
    if args.farmer_public_key is not None:
        farmer_public_key = G1Element.from_bytes(
            bytes.fromhex(args.farmer_public_key))
    else:
        farmer_public_key = get_farmer_public_key(args.alt_fingerprint)

    pool_public_key: G1Element
    if args.pool_public_key is not None:
        pool_public_key = bytes.fromhex(args.pool_public_key)
    else:
        pool_public_key = get_pool_public_key(args.alt_fingerprint)
    if args.num is not None:
        num = args.num
    else:
        num = 1

    if args.size < config["min_mainnet_k_size"]:
        log.warn(
            f"CREATING PLOTS WITH SIZE k={args.size}, which is less than the minimum required for mainnet"
        )
    log.info(
        f"Creating {num} plots of size {args.size}, pool public key:  "
        f"{bytes(pool_public_key).hex()} farmer public key: {bytes(farmer_public_key).hex()}"
    )

    tmp_dir_created = False
    if not args.tmp_dir.exists():
        mkdir(args.tmp_dir)
        tmp_dir_created = True

    tmp2_dir_created = False
    if not args.tmp2_dir.exists():
        mkdir(args.tmp2_dir)
        tmp2_dir_created = True

    mkdir(args.final_dir)

    finished_filenames = []
    for i in range(num):
        # Generate a random master secret key
        if test_private_keys is not None:
            assert len(test_private_keys) == num
            sk: PrivateKey = test_private_keys[i]
        else:
            sk = AugSchemeMPL.key_gen(token_bytes(32))

        # The plot public key is the combination of the harvester and farmer keys
        plot_public_key = ProofOfSpace.generate_plot_public_key(
            master_sk_to_local_sk(sk).get_g1(), farmer_public_key)

        # The plot id is based on the harvester, farmer, and pool keys
        plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk(
            pool_public_key, plot_public_key)
        if args.plotid is not None:
            log.info(f"Debug plot ID: {args.plotid}")
            plot_id: bytes32 = bytes32(bytes.fromhex(args.plotid))

        plot_memo: bytes32 = stream_plot_info(pool_public_key,
                                              farmer_public_key, sk)
        if args.memo is not None:
            log.info(f"Debug memo: {args.memo}")
            plot_memo: bytes32 = bytes.fromhex(args.memo)

        dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M")

        if use_datetime:
            filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot"
        else:
            filename = f"plot-k{args.size}-{plot_id}.plot"
        full_path: Path = args.final_dir / filename

        resolved_final_dir: str = str(Path(args.final_dir).resolve())
        plot_directories_list: str = config["harvester"]["plot_directories"]

        if args.exclude_final_dir:
            log.info(
                f"NOT adding directory {resolved_final_dir} to harvester for farming"
            )
            if resolved_final_dir in plot_directories_list:
                log.warn(
                    f"Directory {resolved_final_dir} already exists for harvester, please remove it manually"
                )
        else:
            if resolved_final_dir not in plot_directories_list:
                # Adds the directory to the plot directories if it is not present
                log.info(
                    f"Adding directory {resolved_final_dir} to harvester for farming"
                )
                config = add_plot_directory(resolved_final_dir, root_path)

        if not full_path.exists():
            log.info(f"Starting plot {i + 1}/{num}")
            # Creates the plot. This will take a long time for larger plots.
            plotter: DiskPlotter = DiskPlotter()
            plotter.create_plot_disk(
                str(args.tmp_dir),
                str(args.tmp2_dir),
                str(args.final_dir),
                filename,
                args.size,
                plot_memo,
                plot_id,
                args.buffer,
                args.buckets,
                args.stripe_size,
                args.num_threads,
                args.nobitfield,
            )
            finished_filenames.append(filename)
        else:
            log.info(f"Plot {filename} already exists")

    log.info("Summary:")

    if tmp_dir_created:
        try:
            args.tmp_dir.rmdir()
        except Exception:
            log.info(
                f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty."
            )

    if tmp2_dir_created:
        try:
            args.tmp2_dir.rmdir()
        except Exception:
            log.info(
                f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty."
            )

    log.info(f"Created a total of {len(finished_filenames)} new plots")
    for filename in finished_filenames:
        log.info(filename)
Example #10
0
async def netstorge_async(args, parser):
    """
    Calculates the estimated space on the network given two block header hases
    # TODO: add help on failure/no args
    """
    try:
        config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
        self_hostname = config["self_hostname"]
        if "rpc_port" not in args or args.rpc_port is None:
            rpc_port = config["full_node"]["rpc_port"]
        else:
            rpc_port = args.rpc_port
        client = await FullNodeRpcClient.create(self_hostname, rpc_port)

        # print (args.blocks)
        if args.delta_block_height:
            # Get lca or newer block
            if args.start == "":
                blockchain_state = await client.get_blockchain_state()
                newer_block_height = blockchain_state["lca"].data.height
            else:
                newer_block_height = int(args.start)  # Starting block height in args
            newer_block_header = await client.get_header_by_height(newer_block_height)
            older_block_height = newer_block_height - int(args.delta_block_height)
            older_block_header = await client.get_header_by_height(older_block_height)
            newer_block_header_hash = str(newer_block_header.get_hash())
            older_block_header_hash = str(older_block_header.get_hash())
            elapsed_time = (
                newer_block_header.data.timestamp - older_block_header.data.timestamp
            )
            newer_block_time_string = human_local_time(
                newer_block_header.data.timestamp
            )
            older_block_time_string = human_local_time(
                older_block_header.data.timestamp
            )
            time_delta = datetime.timedelta(seconds=elapsed_time)
            network_space_bytes_estimate = await client.get_network_space(
                newer_block_header_hash, older_block_header_hash
            )
            print(
                f"Older Block: {older_block_header.data.height}\n"
                f"Header Hash: 0x{older_block_header_hash}\n"
                f"Timestamp:   {older_block_time_string}\n"
                f"Weight:      {older_block_header.data.weight}\n"
                f"Total VDF\n"
                f"Iterations:  {older_block_header.data.total_iters}\n"
            )
            print(
                f"Newer Block: {newer_block_header.data.height}\n"
                f"Header Hash: 0x{newer_block_header_hash}\n"
                f"Timestamp:   {newer_block_time_string}\n"
                f"Weight:      {newer_block_header.data.weight}\n"
                f"Total VDF\n"
                f"Iterations:  {newer_block_header.data.total_iters}\n"
            )
            network_space_terrabytes_estimate = network_space_bytes_estimate / 1024 ** 4
            print(
                f"The elapsed time between blocks is reported as {time_delta}.\n"
                f"The network has an estimated {network_space_terrabytes_estimate:.2f}TB"
            )

    except Exception as e:
        if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
            print(f"Connection error. Check if full node is running at {args.rpc_port}")
        else:
            print(f"Exception {e}")

    client.close()
    await client.await_closed()
    async def start_wallet(self, public_key_fingerprint: Optional[int] = None) -> bool:
        private_keys = self.keychain.get_all_private_keys()
        if len(private_keys) == 0:
            self.log.info("No keys")
            return False

        if public_key_fingerprint is not None:
            for sk, _ in private_keys:
                if sk.get_public_key().get_fingerprint() == public_key_fingerprint:
                    private_key = sk
                    break
        else:
            private_key = private_keys[0][0]

        if private_key is None:
            self.log.info("No keys")
            return False

        if self.config["testing"] is True:
            log.info("Websocket server in testing mode")
            self.wallet_node = await WalletNode.create(
                self.config,
                private_key,
                self.root_path,
                override_constants=test_constants,
                local_test=True,
            )
        else:
            log.info("Not Testing")
            self.wallet_node = await WalletNode.create(
                self.config, private_key, self.root_path
            )

        if self.wallet_node is None:
            return False

        self.trade_manager = await TradeManager.create(
            self.wallet_node.wallet_state_manager
        )
        self.wallet_node.wallet_state_manager.set_callback(self.state_changed_callback)

        net_config = load_config(self.root_path, "config.yaml")
        ping_interval = net_config.get("ping_interval")
        network_id = net_config.get("network_id")
        assert ping_interval is not None
        assert network_id is not None

        server = ChiaServer(
            self.config["port"],
            self.wallet_node,
            NodeType.WALLET,
            ping_interval,
            network_id,
            self.root_path,
            self.config,
        )
        self.wallet_node.set_server(server)

        self.wallet_node._start_bg_tasks()

        return True
Example #12
0
async def async_main():
    root_path = DEFAULT_ROOT_PATH
    net_config = load_config(root_path, "config.yaml")
    config = load_config_cli(root_path, "config.yaml", "timelord")
    initialize_logging("Timelord %(name)-23s", config["logging"], root_path)
    log = logging.getLogger(__name__)
    setproctitle("chia_timelord")

    timelord = Timelord(config, constants)
    ping_interval = net_config.get("ping_interval")
    network_id = net_config.get("network_id")
    assert ping_interval is not None
    assert network_id is not None
    server = ChiaServer(
        config["port"],
        timelord,
        NodeType.TIMELORD,
        ping_interval,
        network_id,
        DEFAULT_ROOT_PATH,
        config,
    )

    timelord_shutdown_task: Optional[asyncio.Task] = None

    coro = asyncio.start_server(
        timelord._handle_client,
        config["vdf_server"]["host"],
        config["vdf_server"]["port"],
        loop=asyncio.get_running_loop(),
    )

    def signal_received():
        nonlocal timelord_shutdown_task
        server.close_all()
        timelord_shutdown_task = asyncio.create_task(timelord._shutdown())

    try:
        asyncio.get_running_loop().add_signal_handler(signal.SIGINT,
                                                      signal_received)
        asyncio.get_running_loop().add_signal_handler(signal.SIGTERM,
                                                      signal_received)
    except NotImplementedError:
        log.info("signal handlers unsupported")

    await asyncio.sleep(10)  # Allows full node to startup

    timelord.set_server(server)
    timelord._start_bg_tasks()

    vdf_server = asyncio.ensure_future(coro)

    async for msg in timelord._manage_discriminant_queue():
        server.push_message(msg)

    log.info("Closed discriminant queue.")
    if timelord_shutdown_task is not None:
        await timelord_shutdown_task
    log.info("Shutdown timelord.")

    await server.await_closed()
    vdf_server.cancel()
    log.info("Timelord fully closed.")
def main():
    """
    Allows replacing keys of farmer, harvester, and pool, all default to True.
    """

    root_path = DEFAULT_ROOT_PATH
    keys_yaml = "keys.yaml"
    parser = argparse.ArgumentParser(description="Chia key generator script.")
    parser.add_argument(
        "-a",
        "--harvester",
        type=str2bool,
        nargs="?",
        const=True,
        default=True,
        help="Regenerate plot key seed",
    )
    parser.add_argument(
        "-p",
        "--pool",
        type=str2bool,
        nargs="?",
        const=True,
        default=True,
        help="Regenerate pool keys",
    )
    parser.add_argument(
        "-w",
        "--wallet",
        type=str2bool,
        nargs="?",
        const=True,
        default=True,
        help="Regenerate wallet keys",
    )
    args = parser.parse_args()

    key_config_filename = config_path_for_filename(root_path, keys_yaml)
    if key_config_filename.exists():
        # If the file exists, warn the user
        yn = input(
            f"The keys file {key_config_filename} already exists. Are you sure"
            f" you want to override the keys? Plots might become invalid. (y/n): "
        )
        if not (yn.lower() == "y" or yn.lower() == "yes"):
            quit()
    else:
        # Create the file if if doesn't exist
        mkdir(key_config_filename.parent)
        open(key_config_filename, "a").close()

    key_config = load_config(root_path, keys_yaml)
    if key_config is None:
        key_config = {}

    wallet_target = None
    if args.wallet:
        wallet_sk = ExtendedPrivateKey.from_seed(token_bytes(32))
        wallet_target = create_puzzlehash_for_pk(
            BLSPublicKey(bytes(wallet_sk.public_child(0).get_public_key())))
        key_config["wallet_sk"] = bytes(wallet_sk).hex()
        key_config["wallet_target"] = wallet_target.hex()
        save_config(root_path, keys_yaml, key_config)
    if args.harvester:
        # Replaces the harvester's sk seed. Used to generate plot private keys, which are
        # used to sign farmed blocks.
        key_config["sk_seed"] = token_bytes(32).hex()
        save_config(root_path, keys_yaml, key_config)
    if args.pool:
        # Replaces the pools keys and targes. Only useful if running a pool, or doing
        # solo farming. The pool target allows spending of the coinbase.
        pool_sks = [PrivateKey.from_seed(token_bytes(32)) for _ in range(2)]
        if wallet_target is None:
            pool_target = create_puzzlehash_for_pk(
                BLSPublicKey(bytes(pool_sks[0].get_public_key())))
        else:
            pool_target = wallet_target
        key_config["pool_sks"] = [bytes(pool_sk).hex() for pool_sk in pool_sks]
        key_config["pool_target"] = pool_target.hex()
        save_config(root_path, keys_yaml, key_config)
Example #14
0
    def __init__(
        self,
        root_path,
        node: Any,
        peer_api: Any,
        node_type: NodeType,
        advertised_port: int,
        service_name: str,
        network_id=bytes32,
        upnp_ports: List[int] = [],
        server_listen_ports: List[int] = [],
        connect_peers: List[PeerInfo] = [],
        auth_connect_peers: bool = True,
        on_connect_callback: Optional[Callable] = None,
        rpc_info: Optional[Tuple[type, int]] = None,
        parse_cli_args=True,
        connect_to_daemon=True,
    ):
        self.root_path = root_path
        self.config = load_config(root_path, "config.yaml")
        ping_interval = self.config.get("ping_interval")
        self.self_hostname = self.config.get("self_hostname")
        self.daemon_port = self.config.get("daemon_port")
        assert ping_interval is not None
        self._connect_to_daemon = connect_to_daemon
        self._node_type = node_type
        self._service_name = service_name
        self._rpc_task = None
        self._network_id: bytes32 = network_id

        proctitle_name = f"chia_{service_name}"
        setproctitle(proctitle_name)
        self._log = logging.getLogger(service_name)

        if parse_cli_args:
            service_config = load_config_cli(root_path, "config.yaml",
                                             service_name)
        else:
            service_config = load_config(root_path, "config.yaml",
                                         service_name)
        initialize_logging(service_name, service_config["logging"], root_path)

        self._rpc_info = rpc_info
        private_ca_crt, private_ca_key = private_ssl_ca_paths(
            root_path, self.config)
        chia_ca_crt, chia_ca_key = chia_ssl_ca_paths(root_path, self.config)
        self._server = ChiaServer(
            advertised_port,
            node,
            peer_api,
            node_type,
            ping_interval,
            network_id,
            root_path,
            service_config,
            (private_ca_crt, private_ca_key),
            (chia_ca_crt, chia_ca_key),
            name=f"{service_name}_server",
        )
        f = getattr(node, "set_server", None)
        if f:
            f(self._server)
        else:
            self._log.warning(f"No set_server method for {service_name}")

        self._connect_peers = connect_peers
        self._auth_connect_peers = auth_connect_peers
        self._upnp_ports = upnp_ports
        self._server_listen_ports = server_listen_ports

        self._api = peer_api
        self._node = node
        self._did_start = False
        self._is_stopping = asyncio.Event()
        self._stopped_by_rpc = False

        self._on_connect_callback = on_connect_callback
        self._advertised_port = advertised_port
        self._reconnect_tasks: List[asyncio.Task] = []
Example #15
0
async def show_async(
    rpc_port: int,
    state: bool,
    show_connections: bool,
    exit_node: bool,
    add_connection: str,
    remove_connection: str,
    block_header_hash_by_height: str,
    block_by_header_hash: str,
) -> None:

    try:
        config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
        self_hostname = config["self_hostname"]
        if rpc_port is None:
            rpc_port = config["full_node"]["rpc_port"]
        client = await FullNodeRpcClient.create(self_hostname,
                                                uint16(rpc_port),
                                                DEFAULT_ROOT_PATH, config)

        if state:
            blockchain_state = await client.get_blockchain_state()
            if blockchain_state is None:
                print("There is no blockchain found yet. Try again shortly")
                return
            peak: Optional[BlockRecord] = blockchain_state["peak"]
            difficulty = blockchain_state["difficulty"]
            sub_slot_iters = blockchain_state["sub_slot_iters"]
            synced = blockchain_state["sync"]["synced"]
            sync_mode = blockchain_state["sync"]["sync_mode"]
            total_iters = peak.total_iters if peak is not None else 0
            num_blocks: int = 10

            if sync_mode:
                sync_max_block = blockchain_state["sync"]["sync_tip_height"]
                sync_current_block = blockchain_state["sync"][
                    "sync_progress_height"]
                print(
                    "Current Blockchain Status: Full Node syncing to block",
                    sync_max_block,
                    "\nCurrently synced to block:",
                    sync_current_block,
                )
            if synced:
                print("Current Blockchain Status: Full Node Synced")
                print("\nPeak: Hash:",
                      peak.header_hash if peak is not None else "")
            elif peak is not None:
                print(
                    f"Current Blockchain Status: Not Synced. Peak height: {peak.height}"
                )
            else:
                print("\nSearching for an initial chain\n")
                print(
                    "You may be able to expedite with 'chia show -a host:port' using a known node.\n"
                )

            if peak is not None:
                if peak.is_transaction_block:
                    peak_time = peak.timestamp
                else:
                    peak_hash = peak.header_hash
                    curr = await client.get_block_record(peak_hash)
                    while curr is not None and not curr.is_transaction_block:
                        curr = await client.get_block_record(curr.prev_hash)
                    peak_time = curr.timestamp
                peak_time_struct = struct_time(localtime(peak_time))

                print(
                    "      Time:",
                    f"{time.strftime('%a %b %d %Y %T %Z', peak_time_struct)}",
                    f"                 Height: {peak.height:>10}\n",
                )

                print("Estimated network space: ", end="")
                network_space_human_readable = blockchain_state[
                    "space"] / 1024**4
                if network_space_human_readable >= 1024:
                    network_space_human_readable = network_space_human_readable / 1024
                    print(f"{network_space_human_readable:.3f} PiB")
                else:
                    print(f"{network_space_human_readable:.3f} TiB")
                print(f"Current difficulty: {difficulty}")
                print(f"Current VDF sub_slot_iters: {sub_slot_iters}")
                print("Total iterations since the start of the blockchain:",
                      total_iters)
                print("")
                print("  Height: |   Hash:")

                added_blocks: List[BlockRecord] = []
                curr = await client.get_block_record(peak.header_hash)
                while curr is not None and len(
                        added_blocks) < num_blocks and curr.height > 0:
                    added_blocks.append(curr)
                    curr = await client.get_block_record(curr.prev_hash)

                for b in added_blocks:
                    print(f"{b.height:>9} | {b.header_hash}")
            else:
                print("Blockchain has no blocks yet")

            # if called together with show_connections, leave a blank line
            if show_connections:
                print("")
        if show_connections:
            connections = await client.get_connections()
            print("Connections:")
            print(
                "Type      IP                                     Ports       NodeID      Last Connect"
                + "      MiB Up|Dwn")
            for con in connections:
                last_connect_tuple = struct_time(
                    localtime(con["last_message_time"]))
                last_connect = time.strftime("%b %d %T", last_connect_tuple)
                mb_down = con["bytes_read"] / (1024 * 1024)
                mb_up = con["bytes_written"] / (1024 * 1024)

                host = con["peer_host"]
                # Strip IPv6 brackets
                if host[0] == "[":
                    host = host[1:39]
                # Nodetype length is 9 because INTRODUCER will be deprecated
                if NodeType(con["type"]) is NodeType.FULL_NODE:
                    peak_height = con["peak_height"]
                    peak_hash = con["peak_hash"]
                    if peak_hash is None:
                        peak_hash = "No Info"
                    if peak_height is None:
                        peak_height = 0
                    con_str = (
                        f"{NodeType(con['type']).name:9} {host:38} "
                        f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
                        f" {con['node_id'].hex()[:8]}... "
                        f"{last_connect}  "
                        f"{mb_up:7.1f}|{mb_down:<7.1f}"
                        f"\n                                                 "
                        f"-SB Height: {peak_height:8.0f}    -Hash: {peak_hash[2:10]}..."
                    )
                else:
                    con_str = (
                        f"{NodeType(con['type']).name:9} {host:38} "
                        f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
                        f" {con['node_id'].hex()[:8]}... "
                        f"{last_connect}  "
                        f"{mb_up:7.1f}|{mb_down:<7.1f}")
                print(con_str)
            # if called together with state, leave a blank line
            if state:
                print("")
        if exit_node:
            node_stop = await client.stop_node()
            print(node_stop, "Node stopped")
        if add_connection:
            if ":" not in add_connection:
                print(
                    "Enter a valid IP and port in the following format: 10.5.4.3:8000"
                )
            else:
                ip, port = (
                    ":".join(add_connection.split(":")[:-1]),
                    add_connection.split(":")[-1],
                )
                print(f"Connecting to {ip}, {port}")
                try:
                    await client.open_connection(ip, int(port))
                except Exception:
                    print(f"Failed to connect to {ip}:{port}")
        if remove_connection:
            result_txt = ""
            if len(remove_connection) != 8:
                result_txt = "Invalid NodeID. Do not include '.'"
            else:
                connections = await client.get_connections()
                for con in connections:
                    if remove_connection == con["node_id"].hex()[:8]:
                        print("Attempting to disconnect", "NodeID",
                              remove_connection)
                        try:
                            await client.close_connection(con["node_id"])
                        except Exception:
                            result_txt = f"Failed to disconnect NodeID {remove_connection}"
                        else:
                            result_txt = f"NodeID {remove_connection}... {NodeType(con['type']).name} "
                            f"{con['peer_host']} disconnected"
                    elif result_txt == "":
                        result_txt = f"NodeID {remove_connection}... not found"
            print(result_txt)
        if block_header_hash_by_height != "":
            block_header = await client.get_block_record_by_height(
                block_header_hash_by_height)
            if block_header is not None:
                print(f"Header hash of block {block_header_hash_by_height}: "
                      f"{block_header.header_hash.hex()}")
            else:
                print("Block height", block_header_hash_by_height, "not found")
        if block_by_header_hash != "":
            block: Optional[BlockRecord] = await client.get_block_record(
                hexstr_to_bytes(block_by_header_hash))
            full_block: Optional[FullBlock] = await client.get_block(
                hexstr_to_bytes(block_by_header_hash))
            # Would like to have a verbose flag for this
            if block is not None:
                assert full_block is not None
                prev_b = await client.get_block_record(block.prev_hash)
                if prev_b is not None:
                    difficulty = block.weight - prev_b.weight
                else:
                    difficulty = block.weight
                if block.is_transaction_block:
                    assert full_block.transactions_info is not None
                    block_time = struct_time(
                        localtime(
                            full_block.foliage_transaction_block.timestamp
                            if full_block.foliage_transaction_block else None))
                    block_time_string = time.strftime("%a %b %d %Y %T %Z",
                                                      block_time)
                    cost = str(full_block.transactions_info.cost)
                    tx_filter_hash = "Not a transaction block"
                    if full_block.foliage_transaction_block:
                        tx_filter_hash = full_block.foliage_transaction_block.filter_hash
                else:
                    block_time_string = "Not a transaction block"
                    cost = "Not a transaction block"
                    tx_filter_hash = "Not a transaction block"
                print("Block at height", block.height, ":")
                address_prefix = config["network_overrides"]["config"][
                    config["selected_network"]]["address_prefix"]
                farmer_address = encode_puzzle_hash(block.farmer_puzzle_hash,
                                                    address_prefix)
                pool_address = encode_puzzle_hash(block.pool_puzzle_hash,
                                                  address_prefix)
                print(
                    f"Header Hash            0x{block.header_hash.hex()}\n"
                    f"Timestamp              {block_time_string}\n"
                    f"Block Height       {block.height}\n"
                    f"Weight                 {block.weight}\n"
                    f"Previous Block         0x{block.prev_hash.hex()}\n"
                    f"Difficulty             {difficulty}\n"
                    f"Sub-slot iters         {block.sub_slot_iters}\n"
                    f"Cost                   {cost}\n"
                    f"Total VDF Iterations   {block.total_iters}\n"
                    f"Is a Transaction Block?{block.is_transaction_block}\n"
                    f"Deficit                {block.deficit}\n"
                    f"PoSpace 'k' Size       {full_block.reward_chain_block.proof_of_space.size}\n"
                    f"Plot Public Key        0x{full_block.reward_chain_block.proof_of_space.plot_public_key}\n"
                    f"Pool Public Key        0x{full_block.reward_chain_block.proof_of_space.pool_public_key}\n"
                    f"Pool Public Key        "
                    f"0x{full_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash}\n"
                    f"{full_block.reward_chain_block.proof_of_space.pool_contract_puzzle_hash}\n"
                    f"Tx Filter Hash         {tx_filter_hash}\n"
                    f"Farmer Address         {farmer_address}\n"
                    f"Pool Address           {pool_address}\n"
                    f"Fees Amount            {block.fees}\n")
            else:
                print("Block with header hash", block_header_hash_by_height,
                      "not found")

    except Exception as e:
        if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
            print(
                f"Connection error. Check if full node rpc is running at {rpc_port}"
            )
            print("This is normal if full node is still starting up")
        else:
            tb = traceback.format_exc()
            print(f"Exception from 'show' {tb}")

    client.close()
    await client.await_closed()
    async def test1(self, two_nodes):
        num_blocks = 5
        test_rpc_port = uint16(21522)
        full_node_1, full_node_2, server_1, server_2 = two_nodes
        blocks = bt.get_consecutive_blocks(test_constants, num_blocks, [], 10)

        for i in range(1, num_blocks):
            async for _ in full_node_1.respond_unfinished_block(
                    full_node_protocol.RespondUnfinishedBlock(blocks[i])):
                pass
            async for _ in full_node_1.respond_block(
                    full_node_protocol.RespondBlock(blocks[i])):
                pass

        def stop_node_cb():
            full_node_1._close()
            server_1.close_all()

        full_node_rpc_api = FullNodeRpcApi(full_node_1)

        config = load_config(bt.root_path, "config.yaml")
        hostname = config["self_hostname"]
        daemon_port = config["daemon_port"]

        rpc_cleanup = await start_rpc_server(
            full_node_rpc_api,
            hostname,
            daemon_port,
            test_rpc_port,
            stop_node_cb,
            connect_to_daemon=False,
        )

        try:
            client = await FullNodeRpcClient.create("localhost", test_rpc_port)
            state = await client.get_blockchain_state()
            assert state["lca"].header_hash is not None
            assert not state["sync"]["sync_mode"]
            assert len(state["tips"]) > 0
            assert state["difficulty"] > 0
            assert state["ips"] > 0
            assert state["min_iters"] > 0

            block = await client.get_block(state["lca"].header_hash)
            assert block == blocks[2]
            assert (await client.get_block(bytes([1] * 32))) is None

            unf_block_headers = await client.get_unfinished_block_headers(4)
            assert len(unf_block_headers) == 1
            assert unf_block_headers[0] == blocks[4].header

            header = await client.get_header(state["lca"].header_hash)
            assert header == blocks[2].header

            assert (await client.get_header_by_height(2)) == blocks[2].header

            assert (await client.get_header_by_height(100)) is None

            coins = await client.get_unspent_coins(
                blocks[-1].get_coinbase().puzzle_hash, blocks[-1].header_hash)
            assert len(coins) == 3
            coins_lca = await client.get_unspent_coins(
                blocks[-1].get_coinbase().puzzle_hash)
            assert len(coins_lca) == 3

            assert len(await client.get_connections()) == 0

            await client.open_connection("localhost", server_2._port)

            async def num_connections():
                return len(await client.get_connections())

            await time_out_assert(10, num_connections, 1)
            connections = await client.get_connections()

            await client.close_connection(connections[0]["node_id"])
            await time_out_assert(10, num_connections, 0)
        except AssertionError:
            # Checks that the RPC manages to stop the node
            client.close()
            await client.await_closed()
            await rpc_cleanup()
            raise

        client.close()
        await client.await_closed()
        await rpc_cleanup()
Example #17
0
def check_plots(args, root_path):
    config = load_config(root_path, "config.yaml")
    if args.num is not None:
        num = args.num
    else:
        num = 20

    v = Verifier()
    log.info("Loading plots in config.yaml using plot_tools loading code\n")
    kc: Keychain = Keychain()
    pks = [
        master_sk_to_farmer_sk(sk).get_g1()
        for sk, _ in kc.get_all_private_keys()
    ]
    pool_public_keys = [
        G1Element.from_bytes(bytes.fromhex(pk))
        for pk in config["farmer"]["pool_public_keys"]
    ]
    _, provers, failed_to_open_filenames, no_key_filenames = load_plots(
        {},
        set(),
        pks,
        pool_public_keys,
        root_path,
        open_no_key_filenames=True,
    )
    if len(provers) > 0:
        log.info("")
        log.info("")
        log.info(f"Starting to test each plot with {num} challenges each\n")
    total_good_plots: Counter = Counter()
    total_bad_plots = 0
    total_size = 0

    for plot_path, plot_info in provers.items():
        pr = plot_info.prover
        log.info(f"Testing plot {plot_path} k={pr.get_size()}")
        log.info(f"\tPool public key: {plot_info.pool_public_key}")
        log.info(f"\tFarmer public key: {plot_info.farmer_public_key}")
        log.info(f"\tLocal sk: {plot_info.local_sk}")
        total_proofs = 0
        try:
            for i in range(num):
                challenge = std_hash(i.to_bytes(32, "big"))
                for index, quality_str in enumerate(
                        pr.get_qualities_for_challenge(challenge)):
                    proof = pr.get_full_proof(challenge, index)
                    total_proofs += 1
                    ver_quality_str = v.validate_proof(pr.get_id(),
                                                       pr.get_size(),
                                                       challenge, proof)
                    assert quality_str == ver_quality_str
        except BaseException as e:
            if isinstance(e, KeyboardInterrupt):
                log.warning("Interrupted, closing")
                return
            log.error(
                f"{type(e)}: {e} error in proving/verifying for plot {plot_path}"
            )
        if total_proofs > 0:
            log.info(
                f"\tProofs {total_proofs} / {num}, {round(total_proofs/float(num), 4)}"
            )
            total_good_plots[pr.get_size()] += 1
            total_size += plot_path.stat().st_size
        else:
            total_bad_plots += 1
            log.error(
                f"\tProofs {total_proofs} / {num}, {round(total_proofs/float(num), 4)}"
            )
    log.info("")
    log.info("")
    log.info("Summary")
    total_plots: int = sum(list(total_good_plots.values()))
    log.info(
        f"Found {total_plots} valid plots, total size {total_size / (1024 * 1024 * 1024 * 1024):.5f} TB"
    )
    for (k, count) in sorted(dict(total_good_plots).items()):
        log.info(f"{count} plots of size {k}")
    grand_total_bad = total_bad_plots + len(failed_to_open_filenames)
    if grand_total_bad > 0:
        log.warning(f"{grand_total_bad} invalid plots")
    if len(no_key_filenames) > 0:
        log.warning(
            f"There are {len(no_key_filenames)} plots with a farmer or pool public key that "
            f"is not on this machine. The farmer private key must be in the keychain in order to "
            f"farm them, use 'chia keys' to transfer keys. The pool public keys must be in the config.yaml"
        )
Example #18
0
def load_plots(
    provers: Dict[Path, PlotInfo],
    failed_to_open_filenames: Dict[Path, int],
    farmer_public_keys: Optional[List[G1Element]],
    pool_public_keys: Optional[List[G1Element]],
    root_path: Path,
    open_no_key_filenames=False,
) -> Tuple[bool, Dict[Path, PlotInfo], Dict[Path, int], Set[Path]]:
    config_file = load_config(root_path, "config.yaml", "harvester")
    changed = False
    no_key_filenames: Set[Path] = set()
    log.info(f'Searching directories {config_file["plot_directories"]}')

    plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file)
    all_filenames: List[Path] = []
    for paths in plot_filenames.values():
        all_filenames += paths
    total_size = 0
    new_provers: Dict[Path, PlotInfo] = {}

    for filename in all_filenames:
        if filename.exists():
            if (filename in failed_to_open_filenames and
                (time.time() - failed_to_open_filenames[filename]) < 1200):
                # Try once every 20 minutes to open the file
                continue
            if filename in provers:
                stat_info = filename.stat()
                if stat_info.st_mtime == provers[filename].time_modified:
                    total_size += stat_info.st_size
                    new_provers[filename] = provers[filename]
                    continue
            try:
                prover = DiskProver(str(filename))
                (
                    pool_public_key,
                    farmer_public_key,
                    local_master_sk,
                ) = parse_plot_info(prover.get_memo())
                # Only use plots that correct keys associated with them
                if (farmer_public_keys is not None
                        and farmer_public_key not in farmer_public_keys):
                    log.warning(
                        f"Plot {filename} has a farmer public key that is not in the farmer's pk list."
                    )
                    no_key_filenames.add(filename)
                    if not open_no_key_filenames:
                        continue

                if (pool_public_keys is not None
                        and pool_public_key not in pool_public_keys):
                    log.warning(
                        f"Plot {filename} has a pool public key that is not in the farmer's pool pk list."
                    )
                    no_key_filenames.add(filename)
                    if not open_no_key_filenames:
                        continue

                stat_info = filename.stat()
                local_sk = master_sk_to_local_sk(local_master_sk)
                plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
                    local_sk.get_g1(), farmer_public_key)
                new_provers[filename] = PlotInfo(
                    prover,
                    pool_public_key,
                    farmer_public_key,
                    plot_public_key,
                    local_sk,
                    stat_info.st_size,
                    stat_info.st_mtime,
                )
                total_size += stat_info.st_size
                changed = True
            except Exception as e:
                tb = traceback.format_exc()
                log.error(f"Failed to open file {filename}. {e} {tb}")
                failed_to_open_filenames[filename] = int(time.time())
                continue
            log.info(
                f"Found plot {filename} of size {new_provers[filename].prover.get_size()}"
            )

    log.info(
        f"Loaded a total of {len(new_provers)} plots of size {total_size / (1024 ** 4)} TiB"
    )
    return (changed, new_provers, failed_to_open_filenames, no_key_filenames)
Example #19
0
async def setup_full_node_simulator(db_name,
                                    port,
                                    introducer_port=None,
                                    dic={}):
    # SETUP
    test_constants_copy = test_constants.copy()
    for k in dic.keys():
        test_constants_copy[k] = dic[k]

    db_path = Path(db_name)
    connection = await aiosqlite.connect(db_path)
    store_1 = await FullNodeStore.create(connection)
    await store_1._clear_database()
    unspent_store_1 = await CoinStore.create(connection)
    await unspent_store_1._clear_database()
    mempool_1 = MempoolManager(unspent_store_1, test_constants_copy)

    b_1: Blockchain = await Blockchain.create(unspent_store_1, store_1,
                                              test_constants_copy)
    await mempool_1.new_tips(await b_1.get_full_tips())

    await store_1.add_block(
        FullBlock.from_bytes(test_constants_copy["GENESIS_BLOCK"]))

    net_config = load_config(root_path, "config.yaml")
    ping_interval = net_config.get("ping_interval")
    network_id = net_config.get("network_id")

    config = load_config(root_path, "config.yaml", "full_node")

    if introducer_port is not None:
        config["introducer_peer"]["host"] = "127.0.0.1"
        config["introducer_peer"]["port"] = introducer_port
    full_node_1 = FullNodeSimulator(
        store_1,
        b_1,
        config,
        mempool_1,
        unspent_store_1,
        f"full_node_{port}",
        test_constants_copy,
    )
    assert ping_interval is not None
    assert network_id is not None
    server_1 = ChiaServer(
        port,
        full_node_1,
        NodeType.FULL_NODE,
        ping_interval,
        network_id,
        "full-node-simulator-server",
    )
    _ = await server_1.start_server(full_node_1._on_connect)
    full_node_1._set_server(server_1)

    yield (full_node_1, server_1)

    # TEARDOWN
    full_node_1._shutdown()
    server_1.close_all()
    await server_1.await_closed()
    await connection.close()
    Path(db_name).unlink()
Example #20
0
    10,
    "PROPAGATION_DELAY_THRESHOLD":
    20,
    "TX_PER_SEC":
    1,
    "MEMPOOL_BLOCK_BUFFER":
    10,
    "MIN_ITERS_STARTING":
    50 * 1,
    "NUMBER_ZERO_BITS_CHALLENGE_SIG":
    1,
    "CLVM_COST_RATIO_CONSTANT":
    108,
})

global_config = load_config(bt.root_path, "config.yaml")
self_hostname = global_config["self_hostname"]


def constants_for_dic(dic):
    return test_constants.replace(**dic)


async def _teardown_nodes(node_aiters: List) -> None:
    awaitables = [node_iter.__anext__() for node_iter in node_aiters]
    for sublist_awaitable in asyncio.as_completed(awaitables):
        try:
            await sublist_awaitable
        except StopAsyncIteration:
            pass
Example #21
0
def check_plots(root_path, num, challenge_start, grep_string, list_duplicates,
                debug_show_memo):
    config = load_config(root_path, "config.yaml")
    if num is not None:
        if num == 0:
            log.warning("Not opening plot files")
        else:
            if num < 5:
                num = 5
                log.warning(
                    f"{num} challenges is too low, setting it to the minimum of 5"
                )
            if num < 30:
                log.warning(
                    "Use 30 challenges (our default) for balance of speed and accurate results"
                )
    else:
        num = 30

    if challenge_start is not None:
        num_start = challenge_start
        num_end = num_start + num
    else:
        num_start = 0
        num_end = num
    challenges = num_end - num_start

    if grep_string is not None:
        match_str = grep_string
    else:
        match_str = None
    if list_duplicates:
        log.warning("Checking for duplicate Plot IDs")
        log.info("Plot filenames expected to end with -[64 char plot ID].plot")

    show_memo: bool = debug_show_memo

    if list_duplicates:
        plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(
            config["harvester"])
        all_filenames: List[Path] = []
        for paths in plot_filenames.values():
            all_filenames += paths
        find_duplicate_plot_IDs(all_filenames)

    if num == 0:
        return

    v = Verifier()
    log.info("Loading plots in config.yaml using plot_tools loading code\n")
    kc: Keychain = Keychain()
    pks = [
        master_sk_to_farmer_sk(sk).get_g1()
        for sk, _ in kc.get_all_private_keys()
    ]
    pool_public_keys = [
        G1Element.from_bytes(bytes.fromhex(pk))
        for pk in config["farmer"]["pool_public_keys"]
    ]
    _, provers, failed_to_open_filenames, no_key_filenames = load_plots(
        {},
        {},
        pks,
        pool_public_keys,
        match_str,
        show_memo,
        root_path,
        open_no_key_filenames=True,
    )
    if len(provers) > 0:
        log.info("")
        log.info("")
        log.info(f"Starting to test each plot with {num} challenges each\n")
    total_good_plots: Counter = Counter()
    total_bad_plots = 0
    total_size = 0

    for plot_path, plot_info in provers.items():
        pr = plot_info.prover
        log.info(f"Testing plot {plot_path} k={pr.get_size()}")
        log.info(f"\tPool public key: {plot_info.pool_public_key}")
        log.info(f"\tFarmer public key: {plot_info.farmer_public_key}")
        log.info(f"\tLocal sk: {plot_info.local_sk}")
        total_proofs = 0
        for i in range(num_start, num_end):
            challenge = std_hash(i.to_bytes(32, "big"))
            for index, quality_str in enumerate(
                    pr.get_qualities_for_challenge(challenge)):
                try:
                    proof = pr.get_full_proof(challenge, index)
                    total_proofs += 1
                    ver_quality_str = v.validate_proof(pr.get_id(),
                                                       pr.get_size(),
                                                       challenge, proof)
                    assert quality_str == ver_quality_str
                except BaseException as e:
                    if isinstance(e, KeyboardInterrupt):
                        log.warning("Interrupted, closing")
                        return
                    log.error(
                        f"{type(e)}: {e} error in proving/verifying for plot {plot_path}"
                    )
        if total_proofs > 0:
            log.info(
                f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}"
            )
            total_good_plots[pr.get_size()] += 1
            total_size += plot_path.stat().st_size
        else:
            total_bad_plots += 1
            log.error(
                f"\tProofs {total_proofs} / {challenges}, {round(total_proofs/float(challenges), 4)}"
            )
    log.info("")
    log.info("")
    log.info("Summary")
    total_plots: int = sum(list(total_good_plots.values()))
    log.info(
        f"Found {total_plots} valid plots, total size {total_size / (1024 * 1024 * 1024 * 1024):.5f} TiB"
    )
    for (k, count) in sorted(dict(total_good_plots).items()):
        log.info(f"{count} plots of size {k}")
    grand_total_bad = total_bad_plots + len(failed_to_open_filenames)
    if grand_total_bad > 0:
        log.warning(f"{grand_total_bad} invalid plots")
    if len(no_key_filenames) > 0:
        log.warning(
            f"There are {len(no_key_filenames)} plots with a farmer or pool public key that "
            f"is not on this machine. The farmer private key must be in the keychain in order to "
            f"farm them, use 'chia keys' to transfer keys. The pool public keys must be in the config.yaml"
        )
Example #22
0
async def setup_wallet_node(
    port,
    consensus_constants: ConsensusConstants,
    full_node_port=None,
    introducer_port=None,
    key_seed=None,
    starting_height=None,
):
    config = load_config(bt.root_path, "config.yaml", "wallet")
    if starting_height is not None:
        config["starting_height"] = starting_height
    config["initial_num_public_keys"] = 5

    entropy = token_bytes(32)
    keychain = Keychain(entropy.hex(), True)
    keychain.add_private_key(bytes_to_mnemonic(entropy), "")
    first_pk = keychain.get_first_public_key()
    assert first_pk is not None
    db_path_key_suffix = str(first_pk.get_fingerprint())
    db_name = f"test-wallet-db-{port}"
    db_path = bt.root_path / f"test-wallet-db-{port}-{db_path_key_suffix}"
    if db_path.exists():
        db_path.unlink()
    config["database_path"] = str(db_name)

    api = WalletNode(
        config,
        keychain,
        bt.root_path,
        consensus_constants=consensus_constants,
        name="wallet1",
    )
    periodic_introducer_poll = None
    if introducer_port is not None:
        periodic_introducer_poll = (
            PeerInfo(self_hostname, introducer_port),
            30,
            config["target_peer_count"],
        )
    connect_peers: List[PeerInfo] = []
    if full_node_port is not None:
        connect_peers = [PeerInfo(self_hostname, full_node_port)]

    started = asyncio.Event()

    async def start_callback():
        await api._start(new_wallet=True)
        nonlocal started
        started.set()

    def stop_callback():
        api._close()

    async def await_closed_callback():
        await api._await_closed()

    service = Service(
        root_path=bt.root_path,
        api=api,
        node_type=NodeType.WALLET,
        advertised_port=port,
        service_name="wallet",
        server_listen_ports=[port],
        connect_peers=connect_peers,
        auth_connect_peers=False,
        on_connect_callback=api._on_connect,
        start_callback=start_callback,
        stop_callback=stop_callback,
        await_closed_callback=await_closed_callback,
        periodic_introducer_poll=periodic_introducer_poll,
        parse_cli_args=False,
    )

    run_task = asyncio.create_task(service.run())
    await started.wait()

    yield api, api.server

    service.stop()
    await run_task
    if db_path.exists():
        db_path.unlink()
    keychain.delete_all_keys()
Example #23
0
async def main():
    root_path = DEFAULT_ROOT_PATH
    net_config = load_config(root_path, "config.yaml")
    config = load_config_cli(root_path, "config.yaml", "full_node")
    setproctitle("chia_full_node_simulator")
    initialize_logging("FullNode %(name)-23s", config["logging"], root_path)

    log = logging.getLogger(__name__)
    server_closed = False

    db_path = path_from_root(root_path, config["simulator_database_path"])
    mkdir(db_path.parent)

    config["database_path"] = config["simulator_database_path"]
    full_node = await FullNodeSimulator.create(
        config,
        root_path=root_path,
        override_constants=test_constants,
    )

    ping_interval = net_config.get("ping_interval")
    network_id = net_config.get("network_id")

    # Starts the full node server (which full nodes can connect to)
    assert ping_interval is not None
    assert network_id is not None
    server = ChiaServer(
        config["port"],
        full_node,
        NodeType.FULL_NODE,
        ping_interval,
        network_id,
        DEFAULT_ROOT_PATH,
        config,
    )
    full_node._set_server(server)
    server_socket = await start_server(server, full_node._on_connect)
    rpc_cleanup = None

    def stop_all():
        nonlocal server_closed
        if not server_closed:
            # Called by the UI, when node is closed, or when a signal is sent
            log.info("Closing all connections, and server...")
            server.close_all()
            server_socket.close()
            server_closed = True

        # Starts the RPC server

    rpc_cleanup = await start_full_node_rpc_server(full_node, stop_all,
                                                   config["rpc_port"])

    try:
        asyncio.get_running_loop().add_signal_handler(signal.SIGINT, stop_all)
        asyncio.get_running_loop().add_signal_handler(signal.SIGTERM, stop_all)
    except NotImplementedError:
        log.info("signal handlers unsupported")

    # Awaits for server and all connections to close
    await server_socket.wait_closed()
    await server.await_closed()
    log.info("Closed all node servers.")

    # Stops the full node and closes DBs
    await full_node._await_closed()

    # Waits for the rpc server to close
    if rpc_cleanup is not None:
        await rpc_cleanup()
    log.info("Closed RPC server.")

    await asyncio.get_running_loop().shutdown_asyncgens()
    log.info("Node fully closed.")
Example #24
0
async def setup_full_node(
    consensus_constants: ConsensusConstants,
    db_name,
    port,
    introducer_port=None,
    simulator=False,
    send_uncompact_interval=30,
):
    db_path = bt.root_path / f"{db_name}"
    if db_path.exists():
        db_path.unlink()

    config = load_config(bt.root_path, "config.yaml", "full_node")
    config["database_path"] = db_name
    config["send_uncompact_interval"] = send_uncompact_interval
    periodic_introducer_poll = None
    if introducer_port is not None:
        periodic_introducer_poll = (
            PeerInfo(self_hostname, introducer_port),
            30,
            config["target_peer_count"],
        )
    if not simulator:
        api: FullNode = FullNode(
            config=config,
            root_path=bt.root_path,
            consensus_constants=consensus_constants,
            name=f"full_node_{port}",
        )
    else:
        api = FullNodeSimulator(
            config=config,
            root_path=bt.root_path,
            consensus_constants=consensus_constants,
            name=f"full_node_sim_{port}",
            bt=bt,
        )

    started = asyncio.Event()

    async def start_callback():
        await api._start()
        nonlocal started
        started.set()

    def stop_callback():
        api._close()

    async def await_closed_callback():
        await api._await_closed()

    service = Service(
        root_path=bt.root_path,
        api=api,
        node_type=NodeType.FULL_NODE,
        advertised_port=port,
        service_name="full_node",
        server_listen_ports=[port],
        auth_connect_peers=False,
        on_connect_callback=api._on_connect,
        start_callback=start_callback,
        stop_callback=stop_callback,
        await_closed_callback=await_closed_callback,
        periodic_introducer_poll=periodic_introducer_poll,
        parse_cli_args=False,
    )

    run_task = asyncio.create_task(service.run())
    await started.wait()

    yield api, api.server

    service.stop()
    await run_task
    if db_path.exists():
        db_path.unlink()
Example #25
0
def load_plots(
    provers: Dict[Path, PlotInfo],
    failed_to_open_filenames: Dict[Path, int],
    farmer_public_keys: Optional[List[G1Element]],
    pool_public_keys: Optional[List[G1Element]],
    match_str: Optional[str],
    show_memo: bool,
    root_path: Path,
    open_no_key_filenames=False,
) -> Tuple[bool, Dict[Path, PlotInfo], Dict[Path, int], Set[Path]]:
    start_time = time.time()
    config_file = load_config(root_path, "config.yaml", "harvester")
    changed = False
    no_key_filenames: Set[Path] = set()
    log.info(f'Searching directories {config_file["plot_directories"]}')

    plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(config_file)
    all_filenames: List[Path] = []
    for paths in plot_filenames.values():
        all_filenames += paths
    total_size = 0
    new_provers: Dict[Path, PlotInfo] = {}
    plot_ids: Set[bytes32] = set()

    if match_str is not None:
        log.info(
            f'Only loading plots that contain "{match_str}" in the file or directory name'
        )

    for filename in all_filenames:
        filename_str = str(filename)
        if match_str is not None and match_str not in filename_str:
            continue
        if filename.exists():
            if filename in failed_to_open_filenames and (
                    time.time() - failed_to_open_filenames[filename]) < 1200:
                # Try once every 20 minutes to open the file
                continue
            if filename in provers:
                try:
                    stat_info = filename.stat()
                except Exception as e:
                    log.error(f"Failed to open file {filename}. {e}")
                    continue
                if stat_info.st_mtime == provers[filename].time_modified:
                    total_size += stat_info.st_size
                    new_provers[filename] = provers[filename]
                    plot_ids.add(provers[filename].prover.get_id())
                    continue
            try:
                prover = DiskProver(str(filename))

                expected_size = _expected_plot_size(
                    prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
                stat_info = filename.stat()

                # TODO: consider checking if the file was just written to (which would mean that the file is still
                # being copied). A segfault might happen in this edge case.

                if prover.get_size(
                ) >= 30 and stat_info.st_size < 0.98 * expected_size:
                    log.warning(
                        f"Not farming plot {filename}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
                        f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
                    )
                    continue

                if prover.get_id() in plot_ids:
                    log.warning(
                        f"Have multiple copies of the plot {filename}, not adding it."
                    )
                    continue

                (
                    pool_public_key_or_puzzle_hash,
                    farmer_public_key,
                    local_master_sk,
                ) = parse_plot_info(prover.get_memo())

                # Only use plots that correct keys associated with them
                if farmer_public_keys is not None and farmer_public_key not in farmer_public_keys:
                    log.warning(
                        f"Plot {filename} has a farmer public key that is not in the farmer's pk list."
                    )
                    no_key_filenames.add(filename)
                    if not open_no_key_filenames:
                        continue

                if isinstance(pool_public_key_or_puzzle_hash, G1Element):
                    pool_public_key = pool_public_key_or_puzzle_hash
                    pool_contract_puzzle_hash = None
                else:
                    assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
                    pool_public_key = None
                    pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash

                if (pool_public_keys is not None
                        and pool_public_key is not None
                        and pool_public_key not in pool_public_keys):
                    log.warning(
                        f"Plot {filename} has a pool public key that is not in the farmer's pool pk list."
                    )
                    no_key_filenames.add(filename)
                    if not open_no_key_filenames:
                        continue

                stat_info = filename.stat()
                local_sk = master_sk_to_local_sk(local_master_sk)
                plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
                    local_sk.get_g1(), farmer_public_key)
                new_provers[filename] = PlotInfo(
                    prover,
                    pool_public_key,
                    pool_contract_puzzle_hash,
                    farmer_public_key,
                    plot_public_key,
                    local_sk,
                    stat_info.st_size,
                    stat_info.st_mtime,
                )
                plot_ids.add(prover.get_id())
                total_size += stat_info.st_size
                changed = True
            except Exception as e:
                tb = traceback.format_exc()
                log.error(f"Failed to open file {filename}. {e} {tb}")
                failed_to_open_filenames[filename] = int(time.time())
                continue
            log.info(
                f"Found plot {filename} of size {new_provers[filename].prover.get_size()}"
            )

            if show_memo:
                plot_memo: bytes32
                if pool_contract_puzzle_hash is None:
                    plot_memo = stream_plot_info_pk(pool_public_key,
                                                    farmer_public_key,
                                                    local_master_sk)
                else:
                    plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash,
                                                    farmer_public_key,
                                                    local_master_sk)
                plot_memo_str: str = plot_memo.hex()
                log.info(f"Memo: {plot_memo_str}")

    log.info(
        f"Loaded a total of {len(new_provers)} plots of size {total_size / (1024 ** 4)} TiB, in"
        f" {time.time()-start_time} seconds")
    return changed, new_provers, failed_to_open_filenames, no_key_filenames
Example #26
0
    def __init__(
        self,
        root_path: Path = TEST_ROOT_PATH,
        real_plots: bool = False,
    ):
        create_default_chia_config(root_path)
        initialize_ssl(root_path)
        self.root_path = root_path
        self.wallet_sk: PrivateKey = PrivateKey.from_seed(b"coinbase")
        self.coinbase_target = std_hash(bytes(self.wallet_sk.get_public_key()))
        self.fee_target = std_hash(bytes(self.wallet_sk.get_public_key()))
        self.n_wesolowski = uint8(0)

        if not real_plots:
            # No real plots supplied, so we will use the small test plots
            self.use_any_pos = True
            self.plot_config: Dict = {"plots": {}}
            # Can't go much lower than 19, since plots start having no solutions
            k: uint8 = uint8(19)
            # Uses many plots for testing, in order to guarantee proofs of space at every height
            num_plots = 40
            # Use the empty string as the seed for the private key
            pool_sk: PrivateKey = PrivateKey.from_seed(b"")
            pool_pk: PublicKey = pool_sk.get_public_key()
            plot_sks: List[PrivateKey] = [
                PrivateKey.from_seed(pn.to_bytes(4, "big"))
                for pn in range(num_plots)
            ]
            plot_pks: List[PublicKey] = [
                sk.get_public_key() for sk in plot_sks
            ]

            plot_seeds: List[bytes32] = [
                ProofOfSpace.calculate_plot_seed(pool_pk, plot_pk)
                for plot_pk in plot_pks
            ]
            plot_dir = root_path / "plots"
            mkdir(plot_dir)
            filenames: List[str] = [
                f"genesis-plots-{k}{std_hash(int.to_bytes(i, 4, 'big')).hex()}.dat"
                for i in range(num_plots)
            ]
            done_filenames = set()
            temp_dir = plot_dir / "plot.tmp"
            mkdir(temp_dir)
            try:
                for pn, filename in enumerate(filenames):
                    if not (plot_dir / filename).exists():
                        plotter = DiskPlotter()
                        plotter.create_plot_disk(
                            str(plot_dir),
                            str(plot_dir),
                            filename,
                            k,
                            b"genesis",
                            plot_seeds[pn],
                        )
                        done_filenames.add(filename)
                    self.plot_config["plots"][str(plot_dir / filename)] = {
                        "pool_pk": bytes(pool_pk).hex(),
                        "sk": bytes(plot_sks[pn]).hex(),
                        "pool_sk": bytes(pool_sk).hex(),
                    }
            except KeyboardInterrupt:
                for filename in filenames:
                    if (filename not in done_filenames
                            and (plot_dir / filename).exists()):
                        (plot_dir / filename).unlink()
                sys.exit(1)
        else:
            # Real plots supplied, so we will use these instead of the test plots
            config = load_config_cli(root_path, "config.yaml", "harvester")
            try:
                key_config = load_config(root_path, "keys.yaml")
            except FileNotFoundError:
                raise RuntimeError(
                    "Keys not generated. Run `chia generate keys`")
            try:
                plot_config = load_config(root_path, "plots.yaml")
            except FileNotFoundError:
                raise RuntimeError(
                    "Plots not generated. Run chia-create-plots")

            pool_sks: List[PrivateKey] = [
                PrivateKey.from_bytes(bytes.fromhex(ce))
                for ce in key_config["pool_sks"]
            ]

            for key, value in plot_config["plots"].items():
                for pool_sk in pool_sks:
                    if bytes(pool_sk.get_public_key()).hex(
                    ) == value["pool_pk"]:
                        plot_config["plots"][key]["pool_sk"] = bytes(
                            pool_sk).hex()

            self.plot_config = plot_config
            self.use_any_pos = False
Example #27
0
async def show_async(args, parser):

    # TODO read configuration for rpc_port instead of assuming default

    try:
        config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
        self_hostname = config["self_hostname"]
        if "rpc_port" not in args or args.rpc_port is None:
            rpc_port = config["full_node"]["rpc_port"]
        else:
            rpc_port = args.rpc_port
        client = await FullNodeRpcClient.create(self_hostname, rpc_port)

        if args.state:
            blockchain_state = await client.get_blockchain_state()
            lca_block = blockchain_state["lca"]
            tips = blockchain_state["tips"]
            difficulty = blockchain_state["difficulty"]
            ips = blockchain_state["ips"]
            sync_mode = blockchain_state["sync"]["sync_mode"]
            total_iters = lca_block.data.total_iters
            num_blocks: int = 10

            if sync_mode:
                sync_max_block = blockchain_state["sync"]["sync_tip_height"]
                sync_current_block = blockchain_state["sync"][
                    "sync_progress_height"]
                # print (max_block)
                print(
                    "Current Blockchain Status: Full Node syncing to",
                    sync_max_block,
                    "\nCurrently synched to tip:",
                    sync_current_block,
                )
            else:
                print("Current Blockchain Status: Full Node Synced")
            print("Latest Common Ancestor:\n    ", lca_block.header_hash)
            lca_time = struct_time(localtime(lca_block.data.timestamp))
            # Should auto format the align right of LCA height
            print(
                "     LCA time:",
                time.strftime("%a %b %d %Y %T %Z", lca_time),
                "       LCA height:",
                lca_block.height,
            )
            print("Heights of tips: " + str([h.height for h in tips]))
            print(f"Current difficulty: {difficulty}")
            print(f"Current VDF iterations per second: {ips:.0f}")
            print("Total iterations since genesis:", total_iters)
            print("")
            heads: List[HeaderBlock] = tips
            added_blocks: List[HeaderBlock] = []
            while len(added_blocks) < num_blocks and len(heads) > 0:
                heads = sorted(heads, key=lambda b: b.height, reverse=True)
                max_block = heads[0]
                if max_block not in added_blocks:
                    added_blocks.append(max_block)
                heads.remove(max_block)
                prev: Optional[HeaderBlock] = await client.get_header(
                    max_block.prev_header_hash)
                if prev is not None:
                    heads.append(prev)

            latest_blocks_labels = []
            for i, b in enumerate(added_blocks):
                latest_blocks_labels.append(
                    f"{b.height}:{b.header_hash}"
                    f" {'LCA' if b.header_hash == lca_block.header_hash else ''}"
                    f" {'TIP' if b.header_hash in [h.header_hash for h in tips] else ''}"
                )
            for i in range(len(latest_blocks_labels)):
                if i < 2:
                    print(latest_blocks_labels[i])
                elif i == 2:
                    print(
                        latest_blocks_labels[i],
                        "\n",
                        "                                -----",
                    )
                else:
                    print("", latest_blocks_labels[i])
            # if called together with connections, leave a blank line
            if args.connections:
                print("")
        if args.connections:
            connections = await client.get_connections()
            print("Connections")
            print(
                "Type      IP                                      Ports      NodeID        Last Connect"
                + "       MiB Up|Dwn")
            for con in connections:
                last_connect_tuple = struct_time(
                    localtime(con["last_message_time"]))
                # last_connect = time.ctime(con['last_message_time'])
                last_connect = time.strftime("%b %d %T", last_connect_tuple)
                mb_down = con["bytes_read"] / 1024
                mb_up = con["bytes_written"] / 1024
                # print (last_connect)
                con_str = (
                    f"{NodeType(con['type']).name:9} {con['peer_host']:39} "
                    f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
                    f"{con['node_id'].hex()[:10]}... "
                    f"{last_connect}  "
                    f"{mb_down:7.1f}|{mb_up:<7.1f}")
                print(con_str)
            # if called together with state, leave a blank line
            if args.state:
                print("")
        if args.exit_node:
            node_stop = await client.stop_node()
            print(node_stop, "Node stopped.")
        if args.add_connection:
            if ":" not in args.add_connection:
                print(
                    "Enter a valid IP and port in the following format: 10.5.4.3:8000"
                )
            else:
                ip, port = (
                    ":".join(args.add_connection.split(":")[:-1]),
                    args.add_connection.split(":")[-1],
                )
            print(f"Connecting to {ip}, {port}")
            try:
                await client.open_connection(ip, int(port))
            except Exception:
                # TODO: catch right exception
                print(f"Failed to connect to {ip}:{port}")
        if args.remove_connection:
            result_txt = ""
            if len(args.remove_connection) != 10:
                result_txt = "Invalid NodeID"
            else:
                connections = await client.get_connections()
                for con in connections:
                    if args.remove_connection == con["node_id"].hex()[:10]:
                        print("Attempting to disconnect", "NodeID",
                              args.remove_connection)
                        try:
                            await client.close_connection(con["node_id"])
                        except Exception:
                            result_txt = (
                                f"Failed to disconnect NodeID {args.remove_connection}"
                            )
                        else:
                            result_txt = f"NodeID {args.remove_connection}... {NodeType(con['type']).name} "
                            f"{con['peer_host']} disconnected."
                    elif result_txt == "":
                        result_txt = f"NodeID {args.remove_connection}... not found."
            print(result_txt)
        if args.block_header_hash_by_height != "":
            block_header = await client.get_header_by_height(
                args.block_header_hash_by_height)
            if block_header is not None:
                block_header_string = str(block_header.get_hash())
                print(
                    f"Header hash of block {args.block_header_hash_by_height}: {block_header_string}"
                )
            else:
                print("Block height", args.block_header_hash_by_height,
                      "not found.")
        if args.block_by_header_hash != "":
            block = await client.get_block(
                hexstr_to_bytes(args.block_by_header_hash))
            # Would like to have a verbose flag for this
            if block is not None:
                prev_block_header_hash = block.header.data.prev_header_hash
                prev_block_header = await client.get_block(
                    prev_block_header_hash)
                block_time = struct_time(localtime(
                    block.header.data.timestamp))
                block_time_string = time.strftime("%a %b %d %Y %T %Z",
                                                  block_time)
                if block.header.data.aggregated_signature is None:
                    aggregated_signature = block.header.data.aggregated_signature
                else:
                    aggregated_signature = block.header.data.aggregated_signature
                print("Block", block.header.data.height, ":")
                print(
                    f"Header Hash            0x{args.block_by_header_hash}\n"
                    f"Timestamp              {block_time_string}\n"
                    f"Height                 {block.header.data.height}\n"
                    f"Weight                 {block.header.data.weight}\n"
                    f"Previous Block         0x{block.header.data.prev_header_hash}\n"
                    f"Cost                   {block.header.data.cost}\n"
                    f"Difficulty             {block.header.data.weight-prev_block_header.header.data.weight}\n"
                    f"Total VDF Iterations   {block.header.data.total_iters}\n"
                    f"Block VDF Iterations   {block.proof_of_time.number_of_iterations}\n"
                    f"PoTime Witness Type    {block.proof_of_time.witness_type}\n"
                    f"PoSpace 'k' Size       {block.proof_of_space.size}\n"
                    f"Plot Public Key        0x{block.proof_of_space.plot_public_key}\n"
                    f"Pool Public Key        0x{block.proof_of_space.pool_public_key}\n"
                    f"Tx Filter Hash         {b'block.transactions_filter'.hex()}\n"
                    f"Tx Generator Hash      {block.transactions_generator}\n"
                    f"Coinbase Amount        {block.get_coinbase().amount/1000000000000}\n"
                    f"Coinbase Puzzle Hash   0x{block.get_coinbase().puzzle_hash}\n"
                    f"Fees Amount            {block.get_fees_coin().amount/1000000000000}\n"
                    f"Fees Puzzle Hash       0x{block.get_fees_coin().puzzle_hash}\n"
                    f"Aggregated Signature   {aggregated_signature}")
            else:
                print("Block with header hash", args.block_by_header_hash,
                      "not found.")

        if args.wallet_balances:
            if "wallet_rpc_port" not in args or args.wallet_rpc_port is None:
                wallet_rpc_port = config["wallet"]["rpc_port"]
            else:
                wallet_rpc_port = args.wallet_rpc_port
            wallet_client = await WalletRpcClient.create(
                self_hostname, wallet_rpc_port)
            summaries_response = await wallet_client.get_wallet_summaries()
            if "wallet_summaries" not in summaries_response:
                print("Wallet summary cannot be displayed")
            else:
                print("Balances")
                for wallet_id, summary in summaries_response[
                        "wallet_summaries"].items():
                    balances_response = await wallet_client.get_wallet_balance(
                        wallet_id)
                    if "balances" not in balances_response:
                        print("Balances cannot be displayed")
                        continue
                    balances = balances_response["balances"]
                    if "name" in summary:
                        print(
                            f"Wallet ID {wallet_id} type {summary['type']} {summary['name']}"
                        )
                        print(
                            f"   -Confirmed: {balances['confirmed_wallet_balance']/units['colouredcoin']}"
                        )
                        print(
                            f"   -Unconfirmed: {balances['unconfirmed_wallet_balance']/units['colouredcoin']}"
                        )
                        print(
                            f"   -Spendable: {balances['spendable_balance']/units['colouredcoin']}"
                        )
                        print(
                            f"   -Frozen: {balances['frozen_balance']/units['colouredcoin']}"
                        )
                        print(
                            f"   -Pending change: {balances['pending_change']/units['colouredcoin']}"
                        )
                    else:
                        print(f"Wallet ID {wallet_id} type {summary['type']}")
                        print(
                            f"   -Confirmed: {balances['confirmed_wallet_balance']/units['chia']} TXCH"
                        )
                        print(
                            f"   -Unconfirmed: {balances['unconfirmed_wallet_balance']/units['chia']} TXCH"
                        )
                        print(
                            f"   -Spendable: {balances['spendable_balance']/units['chia']} TXCH"
                        )
                        print(
                            f"   -Frozen: {balances['frozen_balance']/units['chia']} TXCH"
                        )
                        print(
                            f"   -Pending change: {balances['pending_change']/units['chia']} TXCH"
                        )
            wallet_client.close()
            await wallet_client.await_closed()

    except Exception as e:
        if isinstance(e, aiohttp.client_exceptions.ClientConnectorError):
            print(
                f"Connection error. Check if full node is running at {args.rpc_port}"
            )
        else:
            print(f"Exception from 'show' {e}")

    client.close()
    await client.await_closed()
Example #28
0
def main():
    """
    Script for creating plots.yaml from a directory (output file name ==> plots-generated.yaml).
    Copy script to ~/chia-blockchain/src/cmds
    Execute by running: python generate_plots_yaml_file.py 

    Without any parameters the default plots.yaml and keys.yaml locations will be used

    python generate_plots_yaml_file.py -d /mnt/bigdisk/plots             #will scan the specified drive and will create plots-generated.yaml

    python generate_plots_yaml_file.py -a True -d /mnt/bigdisk/plots     #will append entries to lots-generated.yaml, for 2nd, 3rd drives
    """
    root_path = DEFAULT_ROOT_PATH
    plot_config_filename = config_path_for_filename(root_path, "plots.yaml")
    key_config_filename = config_path_for_filename(root_path, "keys.yaml")

    parser = argparse.ArgumentParser(description="Chia plots.yaml generator")

    parser.add_argument("-a",
                        "--append",
                        help="Append to an existing output file",
                        type=bool,
                        default=False)

    new_plots_root = path_from_root(
        root_path,
        load_config(root_path,
                    "config.yaml").get("harvester",
                                       {}).get("new_plot_root", "plots"),
    )

    parser.add_argument(
        "-d",
        "--final_dir",
        help="Directory of plots",
        type=Path,
        default=Path(new_plots_root),
    )

    # We need the keys file, to access pool keys (if the exist), and the sk_seed.
    args = parser.parse_args()
    if not key_config_filename.exists():
        raise RuntimeError("Can not find keys.yaml.")

    # The seed is what will be used to generate a private key for each plot
    key_config = load_config(root_path, key_config_filename)
    sk_seed: bytes = bytes.fromhex(key_config["sk_seed"])

    pool_pk: PublicKey
    # Use the pool public key from the config, useful for solo farming
    pool_sk = PrivateKey.from_bytes(bytes.fromhex(key_config["pool_sks"][0]))
    pool_pk = pool_sk.get_public_key()

    paths = Path(args.final_dir)
    if not paths.exists():
        raise RuntimeError("Path does not exist.")

    if args.append:
        outfile = open(
            str(plot_config_filename)[0:-5] + "-generated.yaml", "a+")
    else:
        outfile = open(
            str(plot_config_filename)[0:-5] + "-generated.yaml", "w+")
        outfile.write("plots:\n")

    pathlist = Path(args.final_dir).glob('*.dat')
    pathlist = sorted(pathlist)
    for path in pathlist:
        #get only th filename from the full path
        filename = path.name

        #split the filename into index, size and plot_seed
        tmp = filename.split('-')
        index = int(tmp[1])
        size = int(tmp[2])
        plot_seed = tmp[3]

        #remove the file extension
        plot_seed = plot_seed[0:-4]
        sk: PrivateKey = PrivateKey.from_seed(sk_seed +
                                              size.to_bytes(1, "big") +
                                              index.to_bytes(4, "big"))
        outfile.write("  " + str(path) + ":\n")
        outfile.write("    pool_pk: " + bytes(pool_pk).hex() + "\n")
        outfile.write("    sk: " + bytes(sk).hex() + "\n")

    outfile.close()
    print("plots-generated.yaml created in the config directory")