示例#1
0
def sign(args):
    if args.message is None:
        print("Please specify the message argument -d")
        quit()

    if args.fingerprint is None or args.hd_path is None:
        print(
            "Please specify the fingerprint argument -f and hd_path argument -t"
        )
        quit()

    message = args.message
    assert message is not None

    k = Keychain()
    private_keys = k.get_all_private_keys()

    fingerprint = args.fingerprint
    assert fingerprint is not None
    hd_path = args.hd_path
    assert hd_path is not None
    path: List[uint32] = [
        uint32(int(i)) for i in hd_path.split("/") if i != "m"
    ]
    for sk, _ in private_keys:
        if sk.get_g1().get_fingerprint() == fingerprint:
            for c in path:
                sk = AugSchemeMPL.derive_child_sk(sk, c)
            print("Public key:", sk.get_g1())
            print("Signature:", AugSchemeMPL.sign(sk, bytes(message, "utf-8")))
            return
    print(f"Fingerprint {fingerprint} not found in keychain")
示例#2
0
async def setup_wallet_node(port,
                            introducer_port=None,
                            key_seed=b"setup_wallet_node",
                            dic={}):
    config = load_config(root_path, "config.yaml", "wallet")
    if "starting_height" in dic:
        config["starting_height"] = dic["starting_height"]

    keychain = Keychain(key_seed.hex(), True)
    keychain.add_private_key_seed(key_seed)
    private_key = keychain.get_all_private_keys()[0][0]
    test_constants_copy = test_constants.copy()
    for k in dic.keys():
        test_constants_copy[k] = dic[k]
    db_path = root_path / f"test-wallet-db-{port}.db"
    if db_path.exists():
        db_path.unlink()
    config["database_path"] = str(db_path)

    net_config = load_config(root_path, "config.yaml")
    ping_interval = net_config.get("ping_interval")
    network_id = net_config.get("network_id")

    wallet = await WalletNode.create(
        config,
        private_key,
        root_path,
        override_constants=test_constants_copy,
        name="wallet1",
    )
    assert ping_interval is not None
    assert network_id is not None
    server = ChiaServer(
        port,
        wallet,
        NodeType.WALLET,
        ping_interval,
        network_id,
        root_path,
        config,
        "wallet-server",
    )
    wallet.set_server(server)

    yield (wallet, server)

    server.close_all()
    await wallet.wallet_state_manager.clear_all_stores()
    await wallet.wallet_state_manager.close_all_stores()
    wallet.wallet_state_manager.unlink_db()
    await server.await_closed()
示例#3
0
def sign(message: str, fingerprint: int, hd_path: str):
    k = Keychain()
    private_keys = k.get_all_private_keys()

    path: List[uint32] = [
        uint32(int(i)) for i in hd_path.split("/") if i != "m"
    ]
    for sk, _ in private_keys:
        if sk.get_g1().get_fingerprint() == fingerprint:
            for c in path:
                sk = AugSchemeMPL.derive_child_sk(sk, c)
            print("Public key:", sk.get_g1())
            print("Signature:", AugSchemeMPL.sign(sk, bytes(message, "utf-8")))
            return
    print(f"Fingerprint {fingerprint} not found in keychain")
def main():
    config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
    config_pool = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", "pool")
    keychain = Keychain()
    kwargs = service_kwargs_for_farmer(DEFAULT_ROOT_PATH, config, config_pool,
                                       keychain, DEFAULT_CONSTANTS)
    return run_service(**kwargs)
示例#5
0
    def test_basic_add_delete(self):
        kc: Keychain = Keychain(testing=True)
        kc.delete_all_keys()

        assert kc._get_free_private_key_seed_index() == 0
        assert kc._get_free_private_key_index() == 0
        assert len(kc.get_all_private_keys()) == 0

        mnemonic = generate_mnemonic()
        seed = seed_from_mnemonic(mnemonic)
        mnemonic_2 = generate_mnemonic()
        seed_2 = seed_from_mnemonic(mnemonic_2)

        kc.add_private_key_seed(seed)
        assert kc._get_free_private_key_seed_index() == 1
        assert kc._get_free_private_key_index() == 0
        assert len(kc.get_all_private_keys()) == 1

        kc.add_private_key_seed(seed_2)
        kc.add_private_key_seed(seed_2)  # checks to not add duplicates
        assert kc._get_free_private_key_seed_index() == 2
        assert kc._get_free_private_key_index() == 0
        assert len(kc.get_all_private_keys()) == 2

        raw = ExtendedPrivateKey.from_seed(b"123")
        kc.add_private_key(raw)
        kc.add_private_key(raw)
        kc.add_private_key(raw)
        kc.add_private_key(raw)  # Checks to not add duplicates
        raw_2 = ExtendedPrivateKey.from_seed(b"1234")
        kc.add_private_key(raw_2)

        assert kc._get_free_private_key_seed_index() == 2
        assert kc._get_free_private_key_index() == 2
        assert len(kc.get_all_private_keys()) == 4
        assert len(kc.get_all_public_keys()) == 4
        assert raw in [k for (k, s) in kc.get_all_private_keys()]

        kc.delete_key_by_fingerprint(raw_2.get_public_key().get_fingerprint())
        assert kc._get_free_private_key_index() == 1
        assert len(kc.get_all_private_keys()) == 3

        seed_key_2 = ExtendedPrivateKey.from_seed(seed_2)
        kc.delete_key_by_fingerprint(seed_key_2.get_public_key().get_fingerprint())
        assert kc._get_free_private_key_seed_index() == 1
        assert len(kc.get_all_private_keys()) == 2

        kc.delete_all_keys()
        assert kc._get_free_private_key_seed_index() == 0
        assert kc._get_free_private_key_index() == 0
        assert len(kc.get_all_private_keys()) == 0

        kc.add_private_key_not_extended(raw_2.get_private_key())
        assert kc._get_free_private_key_seed_index() == 0
        assert kc._get_free_private_key_index() == 1
        assert len(kc.get_all_private_keys()) == 1
        assert raw_2 not in [k for (k, s) in kc.get_all_private_keys()]
        assert raw_2.get_private_key() in [
            k.get_private_key() for (k, s) in kc.get_all_private_keys()
        ]
示例#6
0
def service_kwargs_for_farmer(root_path):
    service_name = "farmer"
    config = load_config_cli(root_path, "config.yaml", service_name)
    keychain = Keychain()

    connect_peers = [
        PeerInfo(config["full_node_peer"]["host"],
                 config["full_node_peer"]["port"])
    ]

    # TOD: Remove once we have pool server
    config_pool = load_config_cli(root_path, "config.yaml", "pool")
    api = Farmer(config, config_pool, keychain, constants)

    kwargs = dict(
        root_path=root_path,
        api=api,
        node_type=NodeType.FARMER,
        advertised_port=config["port"],
        service_name=service_name,
        server_listen_ports=[config["port"]],
        connect_peers=connect_peers,
        auth_connect_peers=False,
        on_connect_callback=api._on_connect,
    )
    if config["start_rpc_server"]:
        kwargs["rpc_info"] = (FarmerRpcApi, config["rpc_port"])
    return kwargs
示例#7
0
async def async_main():
    root_path = DEFAULT_ROOT_PATH
    net_config = load_config(root_path, "config.yaml")
    config = load_config_cli(root_path, "config.yaml", "farmer")

    # TOD: Remove once we have pool server
    config_pool = load_config_cli(root_path, "config.yaml", "pool")

    initialize_logging("Farmer %(name)-25s", config["logging"], root_path)
    log = logging.getLogger(__name__)
    setproctitle("chia_farmer")

    keychain = Keychain()

    farmer = Farmer(config, config_pool, keychain)

    ping_interval = net_config.get("ping_interval")
    network_id = net_config.get("network_id")
    assert ping_interval is not None
    assert network_id is not None
    server = ChiaServer(
        config["port"],
        farmer,
        NodeType.FARMER,
        ping_interval,
        network_id,
        root_path,
        config,
    )

    try:
        asyncio.get_running_loop().add_signal_handler(signal.SIGINT,
                                                      server.close_all)
        asyncio.get_running_loop().add_signal_handler(signal.SIGTERM,
                                                      server.close_all)
    except NotImplementedError:
        log.info("signal handlers unsupported")

    _ = await server.start_server(farmer._on_connect)
    farmer.set_server(server)

    rpc_cleanup = None
    if config["start_rpc_server"]:
        # Starts the RPC server
        rpc_cleanup = await start_farmer_rpc_server(farmer, server.close_all,
                                                    config["rpc_port"])

    await asyncio.sleep(10)  # Allows full node to startup
    farmer._start_bg_tasks()

    await server.await_closed()
    farmer._shut_down = True

    # Waits for the rpc server to close
    if rpc_cleanup is not None:
        await rpc_cleanup()
    log.info("Closed RPC server.")

    log.info("Farmer fully closed.")
示例#8
0
def check_keys(new_root):
    keychain: Keychain = Keychain()
    all_sks = keychain.get_all_private_keys()
    if len(all_sks) == 0:
        print("No keys are present in the keychain. Generate them with 'chia keys generate'")
        return

    config: Dict = load_config(new_root, "config.yaml")
    pool_child_pubkeys = [master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks]
    all_targets = []
    stop_searching_for_farmer = "xch_target_address" not in config["farmer"]
    stop_searching_for_pool = "xch_target_address" not in config["pool"]
    number_of_ph_to_search = 500
    selected = config["selected_network"]
    prefix = config["network_overrides"]["config"][selected]["address_prefix"]
    for i in range(number_of_ph_to_search):
        if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
            break
        for sk, _ in all_sks:
            all_targets.append(
                encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix)
            )
            if all_targets[-1] == config["farmer"].get("xch_target_address"):
                stop_searching_for_farmer = True
            if all_targets[-1] == config["pool"].get("xch_target_address"):
                stop_searching_for_pool = True

    # Set the destinations
    if "xch_target_address" not in config["farmer"]:
        print(f"Setting the xch destination address for coinbase fees reward to {all_targets[0]}")
        config["farmer"]["xch_target_address"] = all_targets[0]
    elif config["farmer"]["xch_target_address"] not in all_targets:
        print(
            f"WARNING: using a farmer address which we don't have the private"
            f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding "
            f"{config['farmer']['xch_target_address']} with {all_targets[0]}"
        )

    if "pool" not in config:
        config["pool"] = {}
    if "xch_target_address" not in config["pool"]:
        print(f"Setting the xch destination address for coinbase reward to {all_targets[0]}")
        config["pool"]["xch_target_address"] = all_targets[0]
    elif config["pool"]["xch_target_address"] not in all_targets:
        print(
            f"WARNING: using a pool address which we don't have the private"
            f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding "
            f"{config['pool']['xch_target_address']} with {all_targets[0]}"
        )

    # Set the pool pks in the farmer
    pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys)
    if "pool_public_keys" in config["farmer"]:
        for pk_hex in config["farmer"]["pool_public_keys"]:
            # Add original ones in config
            pool_pubkeys_hex.add(pk_hex)

    config["farmer"]["pool_public_keys"] = pool_pubkeys_hex
    save_config(new_root, "config.yaml", config)
def get_default_pool_public_key() -> G1Element:
    keychain: Keychain = Keychain()
    sk_ent = keychain.get_first_private_key()
    if sk_ent is None:
        raise RuntimeError(
            "No keys, please run 'chia keys generate' or provide a public key with -f"
        )
    return master_sk_to_pool_sk(sk_ent[0]).get_g1()
示例#10
0
async def setup_wallet_node(
    port,
    consensus_constants: ConsensusConstants,
    full_node_port=None,
    introducer_port=None,
    key_seed=None,
    starting_height=None,
):
    config = bt.config["wallet"]
    config["port"] = port
    config["rpc_port"] = port + 1000
    if starting_height is not None:
        config["starting_height"] = starting_height
    config["initial_num_public_keys"] = 5

    entropy = token_bytes(32)
    keychain = Keychain(entropy.hex(), True)
    keychain.add_private_key(bytes_to_mnemonic(entropy), "")
    first_pk = keychain.get_first_public_key()
    assert first_pk is not None
    db_path_key_suffix = str(first_pk.get_fingerprint())
    db_name = f"test-wallet-db-{port}"
    db_path = bt.root_path / f"test-wallet-db-{port}-{db_path_key_suffix}"
    if db_path.exists():
        db_path.unlink()
    config["database_path"] = str(db_name)
    config["testing"] = True

    config["introducer_peer"]["host"] = self_hostname
    if introducer_port is not None:
        config["introducer_peer"]["port"] = introducer_port
        config["peer_connect_interval"] = 10

    if full_node_port is not None:
        config["full_node_peer"] = {}
        config["full_node_peer"]["host"] = self_hostname
        config["full_node_peer"]["port"] = full_node_port
    else:
        del config["full_node_peer"]

    kwargs = service_kwargs_for_wallet(bt.root_path, config,
                                       consensus_constants, keychain)
    kwargs.update(
        parse_cli_args=False,
        connect_to_daemon=False,
    )

    service = Service(**kwargs)

    await service.start(new_wallet=True)

    yield service._node, service._node.server

    service.stop()
    await service.wait_closed()
    if db_path.exists():
        db_path.unlink()
    keychain.delete_all_keys()
示例#11
0
def get_pool_public_key(alt_fingerprint: Optional[int] = None) -> G1Element:
    sk_ent: Optional[Tuple[PrivateKey, bytes]]
    keychain: Keychain = Keychain()
    if alt_fingerprint is not None:
        sk_ent = keychain.get_private_key_by_fingerprint(alt_fingerprint)
    else:
        sk_ent = keychain.get_first_private_key()
    if sk_ent is None:
        raise RuntimeError("No keys, please run 'chia keys add', 'chia keys generate' or provide a public key with -p")
    return master_sk_to_pool_sk(sk_ent[0]).get_g1()
示例#12
0
async def start_websocket_server():
    """
    Starts WalletNode, WebSocketServer, and ChiaServer
    """

    setproctitle("chia-wallet")
    keychain = Keychain(testing=False)
    websocket_server = WebSocketServer(keychain, DEFAULT_ROOT_PATH)
    await websocket_server.start()
    log.info("Wallet fully closed")
    def __init__(
        self,
        constants: ConsensusConstants = test_constants,
        root_path: Optional[Path] = None,
    ):
        self._tempdir = None
        if root_path is None:
            self._tempdir = tempfile.TemporaryDirectory()
            root_path = Path(self._tempdir.name)

        self.root_path = root_path
        self.constants = constants
        create_default_chia_config(root_path)
        self.keychain = Keychain("testing-1.8.0", True)
        self.keychain.delete_all_keys()
        self.farmer_master_sk = self.keychain.add_private_key(
            bytes_to_mnemonic(std_hash(b"block_tools farmer key")), ""
        )
        self.pool_master_sk = self.keychain.add_private_key(bytes_to_mnemonic(std_hash(b"block_tools pool key")), "")
        self.farmer_pk = master_sk_to_farmer_sk(self.farmer_master_sk).get_g1()
        self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1()
        self.init_plots(root_path)

        initialize_ssl(root_path)
        self.farmer_ph: bytes32 = create_puzzlehash_for_pk(
            master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1()
        )
        self.pool_ph: bytes32 = create_puzzlehash_for_pk(
            master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1()
        )

        self.all_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
        self.pool_pubkeys: List[G1Element] = [master_sk_to_pool_sk(sk).get_g1() for sk in self.all_sks]

        farmer_pubkeys: List[G1Element] = [master_sk_to_farmer_sk(sk).get_g1() for sk in self.all_sks]
        if len(self.pool_pubkeys) == 0 or len(farmer_pubkeys) == 0:
            raise RuntimeError("Keys not generated. Run `chia generate keys`")

        _, loaded_plots, _, _ = load_plots({}, {}, farmer_pubkeys, self.pool_pubkeys, None, root_path)
        self.plots: Dict[Path, PlotInfo] = loaded_plots
        self._config = load_config(self.root_path, "config.yaml")
示例#14
0
def main():
    config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
    # This is simulator
    local_test = config["testing"]
    if local_test is True:
        constants = test_constants
        current = config["database_path"]
        config["database_path"] = f"{current}_simulation"
    else:
        constants = DEFAULT_CONSTANTS
    keychain = Keychain(testing=False)
    kwargs = service_kwargs_for_wallet(DEFAULT_ROOT_PATH, config, constants,
                                       keychain)
    return run_service(**kwargs)
示例#15
0
    def test_bip39_eip2333_test_vector(self):
        kc: Keychain = Keychain(testing=True)
        kc.delete_all_keys()

        mnemonic = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about"
        passphrase = "TREZOR"
        print("entropy to seed:", mnemonic_to_seed(mnemonic, passphrase).hex())
        master_sk = kc.add_private_key(mnemonic, passphrase)
        tv_master_int = 5399117110774477986698372024995405256382522670366369834617409486544348441851
        tv_child_int = 11812940737387919040225825939013910852517748782307378293770044673328955938106
        assert master_sk == PrivateKey.from_bytes(
            tv_master_int.to_bytes(32, "big"))
        child_sk = AugSchemeMPL.derive_child_sk(master_sk, 0)
        assert child_sk == PrivateKey.from_bytes(
            tv_child_int.to_bytes(32, "big"))
def service_kwargs_for_wallet(root_path):
    service_name = "wallet"
    config = load_config_cli(root_path, "config.yaml", service_name)
    keychain = Keychain(testing=False)

    wallet_constants = consensus_constants
    if config["testing"] is True:
        config["database_path"] = "test_db_wallet.db"
        wallet_constants = test_constants

    api = WalletNode(config,
                     keychain,
                     root_path,
                     consensus_constants=wallet_constants)

    if "full_node_peer" in config:
        connect_peers = [
            PeerInfo(config["full_node_peer"]["host"],
                     config["full_node_peer"]["port"])
        ]
    else:
        connect_peers = []

    async def start_callback():
        await api._start()

    def stop_callback():
        api._close()

    async def await_closed_callback():
        await api._await_closed()

    kwargs = dict(
        root_path=root_path,
        api=api,
        node_type=NodeType.WALLET,
        advertised_port=config["port"],
        service_name=service_name,
        server_listen_ports=[config["port"]],
        on_connect_callback=api._on_connect,
        stop_callback=stop_callback,
        start_callback=start_callback,
        await_closed_callback=await_closed_callback,
        rpc_info=(WalletRpcApi, config["rpc_port"]),
        connect_peers=connect_peers,
        auth_connect_peers=False,
    )
    return kwargs
示例#17
0
def check_keys(new_root):
    keychain: Keychain = Keychain()
    all_pubkeys = keychain.get_all_public_keys()
    if len(all_pubkeys) == 0:
        print(
            "No keys are present in the keychain. Generate them with 'chia keys generate_and_add'"
        )
        return
    all_targets = [
        create_puzzlehash_for_pk(
            BLSPublicKey(bytes(epk.public_child(0).get_public_key()))
        ).hex()
        for epk in all_pubkeys
    ]

    config: Dict = load_config(new_root, "config.yaml")
    # Set the destinations
    if (
        "xch_target_puzzle_hash" not in config["farmer"]
        or config["farmer"]["xch_target_puzzle_hash"] not in all_targets
    ):
        print(
            f"Setting the xch destination address for coinbase fees reward to {all_targets[0]}"
        )
        config["farmer"]["xch_target_puzzle_hash"] = all_targets[0]

    if "pool" in config:
        if (
            "xch_target_puzzle_hash" not in config["pool"]
            or config["pool"]["xch_target_puzzle_hash"] not in all_targets
        ):
            print(
                f"Setting the xch destination address for coinbase reward to {all_targets[0]}"
            )
            config["pool"]["xch_target_puzzle_hash"] = all_targets[0]

    # Set the pool pks in the farmer
    all_pubkeys_hex = set([bytes(pk.get_public_key()).hex() for pk in all_pubkeys])
    if "pool_public_keys" in config["farmer"]:
        for pk_hex in config["farmer"]["pool_public_keys"]:
            # Add original ones in config
            all_pubkeys_hex.add(pk_hex)

    config["farmer"]["pool_public_keys"] = all_pubkeys_hex
    save_config(new_root, "config.yaml", config)
示例#18
0
def main():
    config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
    # This is simulator
    local_test = config["testing"]
    if local_test is True:
        constants = test_constants
        current = config["database_path"]
        config["database_path"] = f"{current}_simulation"
    else:
        constants = DEFAULT_CONSTANTS
        genesis_challenge = bytes32(
            bytes.fromhex(config["network_genesis_challenges"][
                config["selected_network"]]))
        constants = constants.replace(GENESIS_CHALLENGE=genesis_challenge)
    keychain = Keychain(testing=False)
    kwargs = service_kwargs_for_wallet(DEFAULT_ROOT_PATH, config, constants,
                                       keychain)
    return run_service(**kwargs)
示例#19
0
def migrate_to_keychain(old_root, new_root):
    # Transfer the keys from the old root config folder into the keychain.
    # Also set the right public keys in the config files for farming.

    print("\nMigrating keys.yaml to keychain")
    keychain: Keychain = Keychain()

    # Migrate wallet sk
    try:
        keys_config = load_config(old_root, "keys.yaml", exit_on_error=False)
        wallet_key_bytes = bytes.fromhex(keys_config["wallet_sk"])
        wallet_sk = ExtendedPrivateKey.from_bytes(wallet_key_bytes)
        keychain.add_private_key(wallet_sk)

        # Migrate pool sks
        pool_sks_bytes = [bytes.fromhex(h) for h in keys_config["pool_sks"]]
        for k_bytes in pool_sks_bytes:
            keychain.add_private_key_not_extended(PrivateKey.from_bytes(k_bytes))
    except ValueError:
        print("No keys.yaml to migrate from.")

    check_keys(new_root)
示例#20
0
        type=int,
        default=None,
        help="Enter the fingerprint of the key you want to delete",
    )

    parser.add_argument(
        "command",
        help=f"Command can be any one of {command_list}",
        type=str,
        nargs="?",
    )
    parser.set_defaults(function=handler)
    parser.print_help = lambda self=parser: help_message()


keychain: Keychain = Keychain()


def generate_and_print():
    """
    Generates a seed for a private key, and prints the mnemonic to the terminal.
    """

    mnemonic = generate_mnemonic()
    mnemonics_string = mnemonic_to_string(mnemonic)
    print("Generating private key. Mnemonic:")
    print(mnemonics_string)
    print(
        "Note that this key has not been added to the keychain. Run chia keys add_seed -m [MNEMONICS] to add"
    )
    return mnemonic
示例#21
0
def check_plots(args, root_path):
    config = load_config(root_path, "config.yaml")
    if args.num is not None:
        num = args.num
    else:
        num = 20
    if args.grep_string is not None:
        match_str = args.grep_string
    else:
        match_str = None

    v = Verifier()
    log.info("Loading plots in config.yaml using plot_tools loading code\n")
    kc: Keychain = Keychain()
    pks = [
        master_sk_to_farmer_sk(sk).get_g1()
        for sk, _ in kc.get_all_private_keys()
    ]
    pool_public_keys = [
        G1Element.from_bytes(bytes.fromhex(pk))
        for pk in config["farmer"]["pool_public_keys"]
    ]
    _, provers, failed_to_open_filenames, no_key_filenames = load_plots(
        {},
        {},
        pks,
        pool_public_keys,
        match_str,
        root_path,
        open_no_key_filenames=True,
    )
    if len(provers) > 0:
        log.info("")
        log.info("")
        log.info(f"Starting to test each plot with {num} challenges each\n")
    total_good_plots: Counter = Counter()
    total_bad_plots = 0
    total_size = 0

    for plot_path, plot_info in provers.items():
        pr = plot_info.prover
        log.info(f"Testing plot {plot_path} k={pr.get_size()}")
        log.info(f"\tPool public key: {plot_info.pool_public_key}")
        log.info(f"\tFarmer public key: {plot_info.farmer_public_key}")
        log.info(f"\tLocal sk: {plot_info.local_sk}")
        total_proofs = 0
        try:
            for i in range(num):
                challenge = std_hash(i.to_bytes(32, "big"))
                for index, quality_str in enumerate(
                        pr.get_qualities_for_challenge(challenge)):
                    proof = pr.get_full_proof(challenge, index)
                    total_proofs += 1
                    ver_quality_str = v.validate_proof(pr.get_id(),
                                                       pr.get_size(),
                                                       challenge, proof)
                    assert quality_str == ver_quality_str
        except BaseException as e:
            if isinstance(e, KeyboardInterrupt):
                log.warning("Interrupted, closing")
                return
            log.error(
                f"{type(e)}: {e} error in proving/verifying for plot {plot_path}"
            )
        if total_proofs > 0:
            log.info(
                f"\tProofs {total_proofs} / {num}, {round(total_proofs/float(num), 4)}"
            )
            total_good_plots[pr.get_size()] += 1
            total_size += plot_path.stat().st_size
        else:
            total_bad_plots += 1
            log.error(
                f"\tProofs {total_proofs} / {num}, {round(total_proofs/float(num), 4)}"
            )
    log.info("")
    log.info("")
    log.info("Summary")
    total_plots: int = sum(list(total_good_plots.values()))
    log.info(
        f"Found {total_plots} valid plots, total size {total_size / (1024 * 1024 * 1024 * 1024):.5f} TiB"
    )
    for (k, count) in sorted(dict(total_good_plots).items()):
        log.info(f"{count} plots of size {k}")
    grand_total_bad = total_bad_plots + len(failed_to_open_filenames)
    if grand_total_bad > 0:
        log.warning(f"{grand_total_bad} invalid plots")
    if len(no_key_filenames) > 0:
        log.warning(
            f"There are {len(no_key_filenames)} plots with a farmer or pool public key that "
            f"is not on this machine. The farmer private key must be in the keychain in order to "
            f"farm them, use 'chia keys' to transfer keys. The pool public keys must be in the config.yaml"
        )
示例#22
0
class BlockTools:
    """
    Tools to generate blocks for testing.
    """
    def __init__(
        self,
        root_path: Optional[Path] = None,
        real_plots: bool = False,
    ):
        self._tempdir = None
        if root_path is None:
            self._tempdir = tempfile.TemporaryDirectory()
            root_path = Path(self._tempdir.name)
        self.root_path = root_path
        self.real_plots = real_plots

        if not real_plots:
            create_default_chia_config(root_path)
            initialize_ssl(root_path)
            # No real plots supplied, so we will use the small test plots
            self.use_any_pos = True
            self.keychain = Keychain("testing-1.8.0", True)
            self.keychain.delete_all_keys()
            self.farmer_master_sk = self.keychain.add_private_key(
                bytes_to_mnemonic(std_hash(b"block_tools farmer key")), "")
            self.pool_master_sk = self.keychain.add_private_key(
                bytes_to_mnemonic(std_hash(b"block_tools pool key")), "")
            self.farmer_pk = master_sk_to_farmer_sk(
                self.farmer_master_sk).get_g1()
            self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1()

            plot_dir = get_plot_dir()
            mkdir(plot_dir)
            temp_dir = plot_dir / "tmp"
            mkdir(temp_dir)
            args = Namespace()
            # Can't go much lower than 18, since plots start having no solutions
            args.size = 18
            # Uses many plots for testing, in order to guarantee proofs of space at every height
            args.num = 40
            args.buffer = 32
            args.farmer_public_key = bytes(self.farmer_pk).hex()
            args.pool_public_key = bytes(self.pool_pk).hex()
            args.tmp_dir = temp_dir
            args.tmp2_dir = plot_dir
            args.final_dir = plot_dir
            args.plotid = None
            args.memo = None
            test_private_keys = [
                AugSchemeMPL.key_gen(std_hash(bytes([i])))
                for i in range(args.num)
            ]
            try:
                # No datetime in the filename, to get deterministic filenames and not replot
                create_plots(
                    args,
                    root_path,
                    use_datetime=False,
                    test_private_keys=test_private_keys,
                )
            except KeyboardInterrupt:
                shutil.rmtree(plot_dir, ignore_errors=True)
                sys.exit(1)
        else:
            initialize_ssl(root_path)
            self.keychain = Keychain()
            self.use_any_pos = False
            sk_and_ent = self.keychain.get_first_private_key()
            assert sk_and_ent is not None
            self.farmer_master_sk = sk_and_ent[0]
            self.pool_master_sk = sk_and_ent[0]

        self.farmer_ph = create_puzzlehash_for_pk(
            master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1())
        self.pool_ph = create_puzzlehash_for_pk(
            master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1())

        self.all_sks = self.keychain.get_all_private_keys()
        self.pool_pubkeys: List[G1Element] = [
            master_sk_to_pool_sk(sk).get_g1() for sk, _ in self.all_sks
        ]
        farmer_pubkeys: List[G1Element] = [
            master_sk_to_farmer_sk(sk).get_g1() for sk, _ in self.all_sks
        ]
        if len(self.pool_pubkeys) == 0 or len(farmer_pubkeys) == 0:
            raise RuntimeError("Keys not generated. Run `chia generate keys`")
        _, self.plots, _, _ = load_plots({}, {}, farmer_pubkeys,
                                         self.pool_pubkeys, root_path)

    def get_plot_signature(self, header_data: HeaderData,
                           plot_pk: G1Element) -> Optional[G2Element]:
        """
        Returns the plot signature of the header data.
        """
        farmer_sk = master_sk_to_farmer_sk(self.all_sks[0][0])
        for _, plot_info in self.plots.items():
            agg_pk = ProofOfSpace.generate_plot_public_key(
                plot_info.local_sk.get_g1(), plot_info.farmer_public_key)
            if agg_pk == plot_pk:
                m = header_data.get_hash()
                harv_share = AugSchemeMPL.sign(plot_info.local_sk, m, agg_pk)
                farm_share = AugSchemeMPL.sign(farmer_sk, m, agg_pk)
                return AugSchemeMPL.aggregate([harv_share, farm_share])

        return None

    def get_pool_key_signature(self, pool_target: PoolTarget,
                               pool_pk: G1Element) -> Optional[G2Element]:
        for sk, _ in self.all_sks:
            sk_child = master_sk_to_pool_sk(sk)
            if sk_child.get_g1() == pool_pk:
                return AugSchemeMPL.sign(sk_child, bytes(pool_target))
        return None

    def get_farmer_wallet_tool(self):
        return WalletTool(self.farmer_master_sk)

    def get_pool_wallet_tool(self):
        return WalletTool(self.pool_master_sk)

    def get_consecutive_blocks(
            self,
            test_constants: ConsensusConstants,
            num_blocks: int,
            block_list: List[FullBlock] = [],
            seconds_per_block=None,
            seed: bytes = b"",
            reward_puzzlehash: bytes32 = None,
            transaction_data_at_height: Dict[int, Tuple[Program,
                                                        G2Element]] = None,
            fees: uint64 = uint64(0),
    ) -> List[FullBlock]:
        if transaction_data_at_height is None:
            transaction_data_at_height = {}
        if seconds_per_block is None:
            seconds_per_block = test_constants.BLOCK_TIME_TARGET

        if len(block_list) == 0:
            block_list.append(
                FullBlock.from_bytes(test_constants.GENESIS_BLOCK))
            prev_difficulty = test_constants.DIFFICULTY_STARTING
            curr_difficulty = prev_difficulty
            curr_min_iters = test_constants.MIN_ITERS_STARTING
        elif len(block_list) < (test_constants.DIFFICULTY_EPOCH +
                                test_constants.DIFFICULTY_DELAY):
            # First epoch (+delay), so just get first difficulty
            prev_difficulty = block_list[0].weight
            curr_difficulty = block_list[0].weight
            assert test_constants.DIFFICULTY_STARTING == prev_difficulty
            curr_min_iters = test_constants.MIN_ITERS_STARTING
        else:
            curr_difficulty = block_list[-1].weight - block_list[-2].weight
            prev_difficulty = (
                block_list[-1 - test_constants.DIFFICULTY_EPOCH].weight -
                block_list[-2 - test_constants.DIFFICULTY_EPOCH].weight)
            assert block_list[-1].proof_of_time is not None
            curr_min_iters = calculate_min_iters_from_iterations(
                block_list[-1].proof_of_space,
                curr_difficulty,
                block_list[-1].proof_of_time.number_of_iterations,
                test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
            )

        starting_height = block_list[-1].height + 1
        timestamp = block_list[-1].header.data.timestamp
        for next_height in range(starting_height,
                                 starting_height + num_blocks):
            if (next_height > test_constants.DIFFICULTY_EPOCH
                    and next_height % test_constants.DIFFICULTY_EPOCH
                    == test_constants.DIFFICULTY_DELAY):
                # Calculates new difficulty
                height1 = uint64(next_height -
                                 (test_constants.DIFFICULTY_EPOCH +
                                  test_constants.DIFFICULTY_DELAY) - 1)
                height2 = uint64(next_height -
                                 (test_constants.DIFFICULTY_EPOCH) - 1)
                height3 = uint64(next_height -
                                 (test_constants.DIFFICULTY_DELAY) - 1)
                if height1 >= 0:
                    block1 = block_list[height1]
                    iters1 = block1.header.data.total_iters
                    timestamp1 = block1.header.data.timestamp
                else:
                    block1 = block_list[0]
                    timestamp1 = uint64(block1.header.data.timestamp -
                                        test_constants.BLOCK_TIME_TARGET)
                    iters1 = uint64(0)
                timestamp2 = block_list[height2].header.data.timestamp
                timestamp3 = block_list[height3].header.data.timestamp

                block3 = block_list[height3]
                iters3 = block3.header.data.total_iters
                term1 = (test_constants.DIFFICULTY_DELAY * prev_difficulty *
                         (timestamp3 - timestamp2) *
                         test_constants.BLOCK_TIME_TARGET)

                term2 = ((test_constants.DIFFICULTY_WARP_FACTOR - 1) *
                         (test_constants.DIFFICULTY_EPOCH -
                          test_constants.DIFFICULTY_DELAY) * curr_difficulty *
                         (timestamp2 - timestamp1) *
                         test_constants.BLOCK_TIME_TARGET)

                # Round down after the division
                new_difficulty_precise: uint64 = uint64(
                    (term1 + term2) // (test_constants.DIFFICULTY_WARP_FACTOR *
                                        (timestamp3 - timestamp2) *
                                        (timestamp2 - timestamp1)))
                new_difficulty = uint64(
                    truncate_to_significant_bits(
                        new_difficulty_precise,
                        test_constants.SIGNIFICANT_BITS))
                max_diff = uint64(
                    truncate_to_significant_bits(
                        test_constants.DIFFICULTY_FACTOR * curr_difficulty,
                        test_constants.SIGNIFICANT_BITS,
                    ))
                min_diff = uint64(
                    truncate_to_significant_bits(
                        curr_difficulty // test_constants.DIFFICULTY_FACTOR,
                        test_constants.SIGNIFICANT_BITS,
                    ))
                if new_difficulty >= curr_difficulty:
                    new_difficulty = min(
                        new_difficulty,
                        max_diff,
                    )
                else:
                    new_difficulty = max([uint64(1), new_difficulty, min_diff])

                min_iters_precise = uint64(
                    (iters3 - iters1) // (test_constants.DIFFICULTY_EPOCH *
                                          test_constants.MIN_ITERS_PROPORTION))
                curr_min_iters = uint64(
                    truncate_to_significant_bits(
                        min_iters_precise, test_constants.SIGNIFICANT_BITS))
                prev_difficulty = curr_difficulty
                curr_difficulty = new_difficulty
            time_taken = seconds_per_block
            timestamp += time_taken

            transactions: Optional[Program] = None
            aggsig: Optional[G2Element] = None
            if next_height in transaction_data_at_height:
                transactions, aggsig = transaction_data_at_height[next_height]

            update_difficulty = (next_height % test_constants.DIFFICULTY_EPOCH
                                 == test_constants.DIFFICULTY_DELAY)
            block_list.append(
                self.create_next_block(
                    test_constants,
                    block_list[-1],
                    timestamp,
                    update_difficulty,
                    curr_difficulty,
                    curr_min_iters,
                    seed,
                    reward_puzzlehash,
                    transactions,
                    aggsig,
                    fees,
                ))
        return block_list

    def create_genesis_block(
        self,
        test_constants: ConsensusConstants,
        challenge_hash=bytes([0] * 32),
        seed: bytes = b"",
        reward_puzzlehash: bytes32 = None,
    ) -> FullBlock:
        """
        Creates the genesis block with the specified details.
        """
        return self._create_block(
            test_constants,
            challenge_hash,
            uint32(0),
            bytes([0] * 32),
            uint64(0),
            uint128(0),
            uint64(int(time.time())),
            uint64(test_constants.DIFFICULTY_STARTING),
            uint64(test_constants.MIN_ITERS_STARTING),
            seed,
            True,
            reward_puzzlehash,
        )

    def create_next_block(
            self,
            test_constants: ConsensusConstants,
            prev_block: FullBlock,
            timestamp: uint64,
            update_difficulty: bool,
            difficulty: int,
            min_iters: int,
            seed: bytes = b"",
            reward_puzzlehash: bytes32 = None,
            transactions: Program = None,
            aggsig: G2Element = None,
            fees: uint64 = uint64(0),
    ) -> FullBlock:
        """
        Creates the next block with the specified details.
        """
        assert prev_block.proof_of_time is not None
        if update_difficulty:
            challenge = Challenge(
                prev_block.proof_of_space.challenge_hash,
                std_hash(prev_block.proof_of_space.get_hash() +
                         prev_block.proof_of_time.output.get_hash()),
                uint64(difficulty),
            )
        else:
            challenge = Challenge(
                prev_block.proof_of_space.challenge_hash,
                std_hash(prev_block.proof_of_space.get_hash() +
                         prev_block.proof_of_time.output.get_hash()),
                None,
            )

        return self._create_block(
            test_constants,
            challenge.get_hash(),
            uint32(prev_block.height + 1),
            prev_block.header_hash,
            prev_block.header.data.total_iters,
            prev_block.weight,
            timestamp,
            uint64(difficulty),
            min_iters,
            seed,
            False,
            reward_puzzlehash,
            transactions,
            aggsig,
            fees,
        )

    def _create_block(
            self,
            test_constants: ConsensusConstants,
            challenge_hash: bytes32,
            height: uint32,
            prev_header_hash: bytes32,
            prev_iters: uint64,
            prev_weight: uint128,
            timestamp: uint64,
            difficulty: int,
            min_iters: int,
            seed: bytes,
            genesis: bool = False,
            reward_puzzlehash: bytes32 = None,
            transactions: Program = None,
            aggsig: G2Element = None,
            fees: uint64 = uint64(0),
    ) -> FullBlock:
        """
        Creates a block with the specified details. Uses the stored plots to create a proof of space,
        and also evaluates the VDF for the proof of time.
        """
        selected_plot_info = None
        selected_proof_index = 0
        selected_quality: Optional[bytes] = None
        best_quality = 0
        plots = [
            pinfo for _, pinfo in sorted(list(self.plots.items()),
                                         key=lambda x: str(x[0]))
        ]
        if self.use_any_pos:
            random.seed(seed)
            for i in range(len(plots) * 3):
                # Allow passing in seed, to create reorgs and different chains
                seeded_pn = random.randint(0, len(plots) - 1)
                plot_info = plots[seeded_pn]
                plot_id = plot_info.prover.get_id()
                ccp = ProofOfSpace.can_create_proof(
                    plot_id,
                    challenge_hash,
                    test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
                )
                if not ccp:
                    continue
                qualities = plot_info.prover.get_qualities_for_challenge(
                    challenge_hash)
                if len(qualities) > 0:
                    selected_plot_info = plot_info
                    selected_quality = qualities[0]
                    break
        else:
            for i in range(len(plots)):
                plot_info = plots[i]
                j = 0
                plot_id = plot_info.prover.get_id()
                ccp = ProofOfSpace.can_create_proof(
                    plot_id,
                    challenge_hash,
                    test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
                )
                if not ccp:
                    continue
                qualities = plot_info.prover.get_qualities_for_challenge(
                    challenge_hash)
                for quality in qualities:
                    qual_int = int.from_bytes(quality, "big", signed=False)
                    if qual_int > best_quality:
                        best_quality = qual_int
                        selected_quality = quality
                        selected_plot_info = plot_info
                        selected_proof_index = j
                    j += 1

        assert selected_plot_info is not None
        if selected_quality is None:
            raise RuntimeError("No proofs for this challenge")

        proof_xs: bytes = selected_plot_info.prover.get_full_proof(
            challenge_hash, selected_proof_index)

        plot_pk = ProofOfSpace.generate_plot_public_key(
            selected_plot_info.local_sk.get_g1(),
            selected_plot_info.farmer_public_key,
        )
        proof_of_space: ProofOfSpace = ProofOfSpace(
            challenge_hash,
            selected_plot_info.pool_public_key,
            plot_pk,
            selected_plot_info.prover.get_size(),
            proof_xs,
        )

        number_iters: uint64 = pot_iterations.calculate_iterations(
            proof_of_space,
            difficulty,
            min_iters,
            test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
        )
        if self.real_plots:
            print(f"Performing {number_iters} VDF iterations")

        int_size = (test_constants.DISCRIMINANT_SIZE_BITS + 16) >> 4

        result = prove(challenge_hash, test_constants.DISCRIMINANT_SIZE_BITS,
                       number_iters)

        output = ClassgroupElement(
            int512(int.from_bytes(
                result[0:int_size],
                "big",
                signed=True,
            )),
            int512(
                int.from_bytes(
                    result[int_size:2 * int_size],
                    "big",
                    signed=True,
                )),
        )
        proof_bytes = result[2 * int_size:4 * int_size]

        proof_of_time = ProofOfTime(
            challenge_hash,
            number_iters,
            output,
            uint8(0),
            proof_bytes,
        )

        # Use the extension data to create different blocks based on header hash
        extension_data: bytes32 = bytes32(
            [random.randint(0, 255) for _ in range(32)])
        cost = uint64(0)

        fee_reward = uint64(block_rewards.calculate_base_fee(height) + fees)

        std_hash(std_hash(height))

        # Create filter
        byte_array_tx: List[bytes32] = []
        tx_additions: List[Coin] = []
        tx_removals: List[bytes32] = []
        if transactions:
            error, npc_list, _ = get_name_puzzle_conditions(transactions)
            additions: List[Coin] = additions_for_npc(npc_list)
            for coin in additions:
                tx_additions.append(coin)
                byte_array_tx.append(bytearray(coin.puzzle_hash))
            for npc in npc_list:
                tx_removals.append(npc.coin_name)
                byte_array_tx.append(bytearray(npc.coin_name))
        farmer_ph = self.farmer_ph
        pool_ph = self.pool_ph
        if reward_puzzlehash is not None:
            farmer_ph = reward_puzzlehash
            pool_ph = reward_puzzlehash

        byte_array_tx.append(bytearray(farmer_ph))
        byte_array_tx.append(bytearray(pool_ph))
        bip158: PyBIP158 = PyBIP158(byte_array_tx)
        encoded = bytes(bip158.GetEncoded())

        removal_merkle_set = MerkleSet()
        addition_merkle_set = MerkleSet()

        # Create removal Merkle set
        for coin_name in tx_removals:
            removal_merkle_set.add_already_hashed(coin_name)

        # Create addition Merkle set
        puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {}
        cb_reward = calculate_block_reward(height)
        cb_coin = create_coinbase_coin(height, pool_ph, cb_reward)
        fees_coin = create_fees_coin(height, farmer_ph, fee_reward)
        for coin in tx_additions + [cb_coin, fees_coin]:
            if coin.puzzle_hash in puzzlehash_coin_map:
                puzzlehash_coin_map[coin.puzzle_hash].append(coin)
            else:
                puzzlehash_coin_map[coin.puzzle_hash] = [coin]

        # Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
        for puzzle, coins in puzzlehash_coin_map.items():
            addition_merkle_set.add_already_hashed(puzzle)
            addition_merkle_set.add_already_hashed(hash_coin_list(coins))

        additions_root = addition_merkle_set.get_root()
        removal_root = removal_merkle_set.get_root()

        generator_hash = (transactions.get_tree_hash()
                          if transactions is not None else bytes32([0] * 32))
        filter_hash = std_hash(encoded)

        pool_target = PoolTarget(pool_ph, uint32(height))
        pool_target_signature = self.get_pool_key_signature(
            pool_target, proof_of_space.pool_public_key)
        assert pool_target_signature is not None
        final_aggsig: G2Element = pool_target_signature
        if aggsig is not None:
            final_aggsig = AugSchemeMPL.aggregate([final_aggsig, aggsig])

        header_data: HeaderData = HeaderData(
            height,
            prev_header_hash,
            timestamp,
            filter_hash,
            proof_of_space.get_hash(),
            uint128(prev_weight + difficulty),
            uint64(prev_iters + number_iters),
            additions_root,
            removal_root,
            farmer_ph,
            fee_reward,
            pool_target,
            final_aggsig,
            cost,
            extension_data,
            generator_hash,
        )

        header_hash_sig: G2Element = self.get_plot_signature(
            header_data, plot_pk)

        header: Header = Header(header_data, header_hash_sig)

        full_block: FullBlock = FullBlock(proof_of_space, proof_of_time,
                                          header, transactions, encoded)

        return full_block
class BlockTools:
    """
    Tools to generate blocks for testing.
    """

    def __init__(
        self,
        constants: ConsensusConstants = test_constants,
        root_path: Optional[Path] = None,
    ):
        self._tempdir = None
        if root_path is None:
            self._tempdir = tempfile.TemporaryDirectory()
            root_path = Path(self._tempdir.name)

        self.root_path = root_path
        self.constants = constants
        create_default_chia_config(root_path)
        self.keychain = Keychain("testing-1.8.0", True)
        self.keychain.delete_all_keys()
        self.farmer_master_sk = self.keychain.add_private_key(
            bytes_to_mnemonic(std_hash(b"block_tools farmer key")), ""
        )
        self.pool_master_sk = self.keychain.add_private_key(bytes_to_mnemonic(std_hash(b"block_tools pool key")), "")
        self.farmer_pk = master_sk_to_farmer_sk(self.farmer_master_sk).get_g1()
        self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1()
        self.init_plots(root_path)

        initialize_ssl(root_path)
        self.farmer_ph: bytes32 = create_puzzlehash_for_pk(
            master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1()
        )
        self.pool_ph: bytes32 = create_puzzlehash_for_pk(
            master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1()
        )

        self.all_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
        self.pool_pubkeys: List[G1Element] = [master_sk_to_pool_sk(sk).get_g1() for sk in self.all_sks]

        farmer_pubkeys: List[G1Element] = [master_sk_to_farmer_sk(sk).get_g1() for sk in self.all_sks]
        if len(self.pool_pubkeys) == 0 or len(farmer_pubkeys) == 0:
            raise RuntimeError("Keys not generated. Run `chia generate keys`")

        _, loaded_plots, _, _ = load_plots({}, {}, farmer_pubkeys, self.pool_pubkeys, None, root_path)
        self.plots: Dict[Path, PlotInfo] = loaded_plots
        self._config = load_config(self.root_path, "config.yaml")

    def init_plots(self, root_path):
        plot_dir = get_plot_dir()
        mkdir(plot_dir)
        temp_dir = plot_dir / "tmp"
        mkdir(temp_dir)
        args = Namespace()
        # Can't go much lower than 20, since plots start having no solutions and more buggy
        args.size = 22
        # Uses many plots for testing, in order to guarantee proofs of space at every height
        args.num = 20
        args.buffer = 100
        args.farmer_public_key = bytes(self.farmer_pk).hex()
        args.pool_public_key = bytes(self.pool_pk).hex()
        args.tmp_dir = temp_dir
        args.tmp2_dir = plot_dir
        args.final_dir = plot_dir
        args.plotid = None
        args.memo = None
        args.buckets = 0
        args.stripe_size = 2000
        args.num_threads = 0
        args.nobitfield = False
        args.exclude_final_dir = False
        test_private_keys = [AugSchemeMPL.key_gen(std_hash(i.to_bytes(2, "big"))) for i in range(args.num)]
        try:
            # No datetime in the filename, to get deterministic filenames and not re-plot
            create_plots(
                args,
                root_path,
                use_datetime=False,
                test_private_keys=test_private_keys,
            )
        except KeyboardInterrupt:
            shutil.rmtree(plot_dir, ignore_errors=True)
            sys.exit(1)

    @property
    def config(self) -> Dict:
        return copy.deepcopy(self._config)

    def get_plot_signature(self, m: bytes32, plot_pk: G1Element) -> G2Element:
        """
        Returns the plot signature of the header data.
        """
        farmer_sk = master_sk_to_farmer_sk(self.all_sks[0])
        for _, plot_info in self.plots.items():
            agg_pk = ProofOfSpace.generate_plot_public_key(plot_info.local_sk.get_g1(), plot_info.farmer_public_key)
            if agg_pk == plot_pk:
                harv_share = AugSchemeMPL.sign(plot_info.local_sk, m, agg_pk)
                farm_share = AugSchemeMPL.sign(farmer_sk, m, agg_pk)
                return AugSchemeMPL.aggregate([harv_share, farm_share])

        raise ValueError(f"Do not have key {plot_pk}")

    def get_pool_key_signature(self, pool_target: PoolTarget, pool_pk: G1Element) -> G2Element:
        for sk in self.all_sks:
            sk_child = master_sk_to_pool_sk(sk)
            if sk_child.get_g1() == pool_pk:
                return AugSchemeMPL.sign(sk_child, bytes(pool_target))
        raise ValueError(f"Do not have key {pool_pk}")

    def get_farmer_wallet_tool(self) -> WalletTool:
        return WalletTool(self.farmer_master_sk)

    def get_pool_wallet_tool(self) -> WalletTool:
        return WalletTool(self.pool_master_sk)

    def get_consecutive_blocks(
        self,
        num_blocks: int,
        block_list_input: List[FullBlock] = None,
        farmer_reward_puzzle_hash: Optional[bytes32] = None,
        pool_reward_puzzle_hash: Optional[bytes32] = None,
        transaction_data: Optional[SpendBundle] = None,
        seed: bytes = b"",
        time_per_sub_block: Optional[float] = None,
        force_overflow: bool = False,
        skip_slots: int = 0,  # Force at least this number of empty slots before the first SB
        guarantee_block: bool = False,  # Force that this sub-block must be a block
    ) -> List[FullBlock]:
        assert num_blocks > 0
        if block_list_input is not None:
            block_list = block_list_input.copy()
        else:
            block_list = []
        constants = self.constants
        transaction_data_included = False
        if time_per_sub_block is None:
            time_per_sub_block = float(constants.SUB_SLOT_TIME_TARGET) / float(constants.SLOT_SUB_BLOCKS_TARGET)

        if farmer_reward_puzzle_hash is None:
            farmer_reward_puzzle_hash = self.farmer_ph
        if pool_reward_puzzle_hash is None:
            pool_reward_puzzle_hash = self.pool_ph
        pool_target = PoolTarget(pool_reward_puzzle_hash, uint32(0))

        if len(block_list) == 0:
            initial_block_list_len = 0
            genesis = self.create_genesis_block(
                constants,
                seed,
                force_overflow=force_overflow,
                skip_slots=skip_slots,
                timestamp=uint64(int(time.time())),
                farmer_reward_puzzle_hash=farmer_reward_puzzle_hash,
            )
            log.info(f"Created block 0 iters: {genesis.total_iters}")
            num_empty_slots_added = skip_slots
            block_list = [genesis]
            num_blocks -= 1
        else:
            initial_block_list_len = len(block_list)
            num_empty_slots_added = uint32(0)  # Allows forcing empty slots in the beginning, for testing purposes

        if num_blocks == 0:
            return block_list

        height_to_hash, difficulty, sub_blocks = load_block_list(block_list, constants)

        latest_sub_block: SubBlockRecord = sub_blocks[block_list[-1].header_hash]
        curr = latest_sub_block
        while not curr.is_block:
            curr = sub_blocks[curr.prev_hash]
        start_timestamp = curr.timestamp
        start_height = curr.sub_block_height

        curr = latest_sub_block
        sub_blocks_added_this_sub_slot = 1

        while not curr.first_in_sub_slot:
            curr = sub_blocks[curr.prev_hash]
            sub_blocks_added_this_sub_slot += 1

        finished_sub_slots_at_sp: List[EndOfSubSlotBundle] = []  # Sub-slots since last sub block, up to signage point
        finished_sub_slots_at_ip: List[EndOfSubSlotBundle] = []  # Sub-slots since last sub block, up to infusion point
        sub_slot_iters: uint64 = latest_sub_block.sub_slot_iters  # The number of iterations in one sub-slot
        same_slot_as_last = True  # Only applies to first slot, to prevent old blocks from being added
        sub_slot_start_total_iters: uint128 = latest_sub_block.ip_sub_slot_total_iters(constants)
        sub_slots_finished = 0
        pending_ses: bool = False

        # Start at the last block in block list
        # Get the challenge for that slot
        while True:
            slot_cc_challenge, slot_rc_challenge = get_challenges(
                constants,
                sub_blocks,
                finished_sub_slots_at_sp,
                latest_sub_block.header_hash,
            )
            prev_num_of_blocks = num_blocks
            if num_empty_slots_added < skip_slots:
                # If did not reach the target slots to skip, don't make any proofs for this sub-slot
                num_empty_slots_added += 1
            else:
                # Loop over every signage point (Except for the last ones, which are used for overflows)
                for signage_point_index in range(0, constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA):
                    curr = latest_sub_block
                    while curr.total_iters > sub_slot_start_total_iters + calculate_sp_iters(
                        constants, sub_slot_iters, uint8(signage_point_index)
                    ):
                        if curr.sub_block_height == 0:
                            break
                        curr = sub_blocks[curr.prev_hash]
                    if curr.total_iters > sub_slot_start_total_iters:
                        finished_sub_slots_at_sp = []

                    if same_slot_as_last:
                        if signage_point_index < latest_sub_block.signage_point_index:
                            # Ignore this signage_point because it's in the past
                            continue

                    signage_point: SignagePoint = get_signage_point(
                        constants,
                        sub_blocks,
                        latest_sub_block,
                        sub_slot_start_total_iters,
                        uint8(signage_point_index),
                        finished_sub_slots_at_sp,
                        sub_slot_iters,
                    )
                    if signage_point_index == 0:
                        cc_sp_output_hash: bytes32 = slot_cc_challenge
                    else:
                        assert signage_point.cc_vdf is not None
                        cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()

                    qualified_proofs: List[Tuple[uint64, ProofOfSpace]] = self.get_pospaces_for_challenge(
                        constants,
                        slot_cc_challenge,
                        cc_sp_output_hash,
                        seed,
                        difficulty,
                        sub_slot_iters,
                    )

                    for required_iters, proof_of_space in sorted(qualified_proofs, key=lambda t: t[0]):
                        if sub_blocks_added_this_sub_slot == constants.MAX_SUB_SLOT_SUB_BLOCKS or force_overflow:
                            break
                        if same_slot_as_last:
                            if signage_point_index == latest_sub_block.signage_point_index:
                                # Ignore this sub-block because it's in the past
                                if required_iters <= latest_sub_block.required_iters:
                                    continue
                        assert latest_sub_block.header_hash in sub_blocks
                        if transaction_data_included:
                            transaction_data = None
                        assert start_timestamp is not None
                        full_block, sub_block_record = get_full_block_and_sub_record(
                            constants,
                            sub_blocks,
                            sub_slot_start_total_iters,
                            uint8(signage_point_index),
                            proof_of_space,
                            slot_cc_challenge,
                            slot_rc_challenge,
                            farmer_reward_puzzle_hash,
                            pool_target,
                            start_timestamp,
                            start_height,
                            time_per_sub_block,
                            transaction_data,
                            height_to_hash,
                            difficulty,
                            required_iters,
                            sub_slot_iters,
                            self.get_plot_signature,
                            self.get_pool_key_signature,
                            finished_sub_slots_at_ip,
                            signage_point,
                            latest_sub_block,
                            seed,
                        )
                        if sub_block_record.is_block:
                            transaction_data_included = True
                        else:
                            if guarantee_block:
                                continue
                        if pending_ses:
                            pending_ses = False
                        block_list.append(full_block)
                        sub_blocks_added_this_sub_slot += 1

                        sub_blocks[full_block.header_hash] = sub_block_record
                        log.info(
                            f"Created block {sub_block_record.sub_block_height} ove=False, iters "
                            f"{sub_block_record.total_iters}"
                        )
                        height_to_hash[uint32(full_block.sub_block_height)] = full_block.header_hash
                        latest_sub_block = sub_blocks[full_block.header_hash]
                        finished_sub_slots_at_ip = []
                        num_blocks -= 1
                        if num_blocks == 0:
                            return block_list

            # Finish the end of sub-slot and try again next sub-slot
            # End of sub-slot logic
            if len(finished_sub_slots_at_ip) == 0:
                # Sub block has been created within this sub-slot
                eos_iters: uint64 = uint64(sub_slot_iters - (latest_sub_block.total_iters - sub_slot_start_total_iters))
                cc_input: ClassgroupElement = latest_sub_block.challenge_vdf_output
                rc_challenge: bytes32 = latest_sub_block.reward_infusion_new_challenge
            else:
                # No sub-blocks were successfully created within this sub-slot
                eos_iters = sub_slot_iters
                cc_input = ClassgroupElement.get_default_element()
                rc_challenge = slot_rc_challenge
            cc_vdf, cc_proof = get_vdf_info_and_proof(
                constants,
                cc_input,
                slot_cc_challenge,
                eos_iters,
            )
            rc_vdf, rc_proof = get_vdf_info_and_proof(
                constants,
                ClassgroupElement.get_default_element(),
                rc_challenge,
                eos_iters,
            )

            eos_deficit: uint8 = (
                latest_sub_block.deficit
                if latest_sub_block.deficit > 0
                else constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK
            )
            icc_ip_vdf, icc_ip_proof = get_icc(
                constants,
                uint128(sub_slot_start_total_iters + sub_slot_iters),
                finished_sub_slots_at_ip,
                latest_sub_block,
                sub_blocks,
                sub_slot_start_total_iters,
                eos_deficit,
            )
            # End of slot vdf info for icc and cc have to be from challenge block or start of slot, respectively,
            # in order for light clients to validate.
            cc_vdf = VDFInfo(cc_vdf.challenge, sub_slot_iters, cc_vdf.output)

            if pending_ses:
                sub_epoch_summary: Optional[SubEpochSummary] = None
            else:
                sub_epoch_summary = next_sub_epoch_summary(
                    constants,
                    sub_blocks,
                    height_to_hash,
                    latest_sub_block.required_iters,
                    block_list[-1],
                    False,
                )
                pending_ses = True

            if sub_epoch_summary is not None:
                ses_hash = sub_epoch_summary.get_hash()
                new_sub_slot_iters: Optional[uint64] = sub_epoch_summary.new_sub_slot_iters
                new_difficulty: Optional[uint64] = sub_epoch_summary.new_difficulty

                log.info(f"Sub epoch summary: {sub_epoch_summary}")
            else:
                ses_hash = None
                new_sub_slot_iters = None
                new_difficulty = None

            if icc_ip_vdf is not None:
                # Icc vdf (Deficit of latest sub-block is <= 4)
                if len(finished_sub_slots_at_ip) == 0:
                    # This means there are sub-blocks in this sub-slot
                    curr = latest_sub_block
                    while not curr.is_challenge_sub_block(constants) and not curr.first_in_sub_slot:
                        curr = sub_blocks[curr.prev_hash]
                    if curr.is_challenge_sub_block(constants):
                        icc_eos_iters = uint64(sub_slot_start_total_iters + sub_slot_iters - curr.total_iters)
                    else:
                        icc_eos_iters = sub_slot_iters
                else:
                    # This means there are no sub-blocks in this sub-slot
                    icc_eos_iters = sub_slot_iters
                icc_ip_vdf = VDFInfo(
                    icc_ip_vdf.challenge,
                    icc_eos_iters,
                    icc_ip_vdf.output,
                )
                icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = InfusedChallengeChainSubSlot(icc_ip_vdf)
                assert icc_sub_slot is not None
                icc_sub_slot_hash = icc_sub_slot.get_hash() if latest_sub_block.deficit == 0 else None
                cc_sub_slot = ChallengeChainSubSlot(
                    cc_vdf,
                    icc_sub_slot_hash,
                    ses_hash,
                    new_sub_slot_iters,
                    new_difficulty,
                )
            else:
                # No icc
                icc_sub_slot = None
                cc_sub_slot = ChallengeChainSubSlot(cc_vdf, None, ses_hash, new_sub_slot_iters, new_difficulty)

            finished_sub_slots_at_ip.append(
                EndOfSubSlotBundle(
                    cc_sub_slot,
                    icc_sub_slot,
                    RewardChainSubSlot(
                        rc_vdf,
                        cc_sub_slot.get_hash(),
                        icc_sub_slot.get_hash() if icc_sub_slot is not None else None,
                        eos_deficit,
                    ),
                    SubSlotProofs(cc_proof, icc_ip_proof, rc_proof),
                )
            )

            finished_sub_slots_eos = finished_sub_slots_at_ip.copy()
            latest_sub_block_eos = latest_sub_block
            overflow_cc_challenge = finished_sub_slots_at_ip[-1].challenge_chain.get_hash()
            overflow_rc_challenge = finished_sub_slots_at_ip[-1].reward_chain.get_hash()

            if transaction_data_included:
                transaction_data = None
            sub_slots_finished += 1
            log.info(
                f"Sub slot finished. Sub-blocks included: {sub_blocks_added_this_sub_slot} sub_blocks_per_slot: "
                f"{(len(block_list) - initial_block_list_len)/sub_slots_finished}"
            )
            sub_blocks_added_this_sub_slot = 0  # Sub slot ended, overflows are in next sub slot

            # Handle overflows: No overflows on new epoch
            if new_sub_slot_iters is None and num_empty_slots_added >= skip_slots and new_difficulty is None:
                for signage_point_index in range(
                    constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA,
                    constants.NUM_SPS_SUB_SLOT,
                ):
                    # note that we are passing in the finished slots which include the last slot
                    signage_point = get_signage_point(
                        constants,
                        sub_blocks,
                        latest_sub_block_eos,
                        sub_slot_start_total_iters,
                        uint8(signage_point_index),
                        finished_sub_slots_eos,
                        sub_slot_iters,
                    )
                    if signage_point_index == 0:
                        cc_sp_output_hash = slot_cc_challenge
                    else:
                        assert signage_point is not None
                        assert signage_point.cc_vdf is not None
                        cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()

                    # If did not reach the target slots to skip, don't make any proofs for this sub-slot
                    qualified_proofs = self.get_pospaces_for_challenge(
                        constants,
                        slot_cc_challenge,
                        cc_sp_output_hash,
                        seed,
                        difficulty,
                        sub_slot_iters,
                    )
                    for required_iters, proof_of_space in sorted(qualified_proofs, key=lambda t: t[0]):
                        if sub_blocks_added_this_sub_slot == constants.MAX_SUB_SLOT_SUB_BLOCKS:
                            break
                        assert start_timestamp is not None
                        full_block, sub_block_record = get_full_block_and_sub_record(
                            constants,
                            sub_blocks,
                            sub_slot_start_total_iters,
                            uint8(signage_point_index),
                            proof_of_space,
                            slot_cc_challenge,
                            slot_rc_challenge,
                            farmer_reward_puzzle_hash,
                            pool_target,
                            start_timestamp,
                            start_height,
                            time_per_sub_block,
                            transaction_data,
                            height_to_hash,
                            difficulty,
                            required_iters,
                            sub_slot_iters,
                            self.get_plot_signature,
                            self.get_pool_key_signature,
                            finished_sub_slots_at_ip,
                            signage_point,
                            latest_sub_block,
                            seed,
                            overflow_cc_challenge=overflow_cc_challenge,
                            overflow_rc_challenge=overflow_rc_challenge,
                        )

                        if sub_block_record.is_block:
                            transaction_data_included = True
                        elif guarantee_block:
                            continue
                        if pending_ses:
                            pending_ses = False

                        block_list.append(full_block)
                        sub_blocks_added_this_sub_slot += 1
                        log.info(
                            f"Created block {sub_block_record.sub_block_height } ov=True, iters "
                            f"{sub_block_record.total_iters}"
                        )
                        num_blocks -= 1
                        if num_blocks == 0:
                            return block_list

                        sub_blocks[full_block.header_hash] = sub_block_record
                        height_to_hash[uint32(full_block.sub_block_height)] = full_block.header_hash
                        latest_sub_block = sub_blocks[full_block.header_hash]
                        finished_sub_slots_at_ip = []

            finished_sub_slots_at_sp = finished_sub_slots_eos.copy()
            same_slot_as_last = False
            sub_slot_start_total_iters = uint128(sub_slot_start_total_iters + sub_slot_iters)
            if num_blocks < prev_num_of_blocks:
                num_empty_slots_added += 1

            if new_sub_slot_iters is not None:
                assert new_difficulty is not None
                sub_slot_iters = new_sub_slot_iters
                difficulty = new_difficulty

    def create_genesis_block(
        self,
        constants: ConsensusConstants,
        seed: bytes32 = b"",
        timestamp: Optional[uint64] = None,
        farmer_reward_puzzle_hash: Optional[bytes32] = None,
        force_overflow: bool = False,
        skip_slots: int = 0,
    ) -> FullBlock:
        if timestamp is None:
            timestamp = uint64(int(time.time()))

        if farmer_reward_puzzle_hash is None:
            farmer_reward_puzzle_hash = self.farmer_ph
        finished_sub_slots: List[EndOfSubSlotBundle] = []
        unfinished_block: Optional[UnfinishedBlock] = None
        ip_iters: uint64 = uint64(0)
        sub_slot_total_iters: uint128 = uint128(0)

        # Keep trying until we get a good proof of space that also passes sp filter
        while True:
            cc_challenge, rc_challenge = get_challenges(constants, {}, finished_sub_slots, None)
            for signage_point_index in range(0, constants.NUM_SPS_SUB_SLOT):
                signage_point: SignagePoint = get_signage_point(
                    constants,
                    {},
                    None,
                    sub_slot_total_iters,
                    uint8(signage_point_index),
                    finished_sub_slots,
                    constants.SUB_SLOT_ITERS_STARTING,
                )
                if signage_point_index == 0:
                    cc_sp_output_hash: bytes32 = cc_challenge
                else:
                    assert signage_point is not None
                    assert signage_point.cc_vdf is not None
                    cc_sp_output_hash = signage_point.cc_vdf.output.get_hash()
                    # If did not reach the target slots to skip, don't make any proofs for this sub-slot
                qualified_proofs: List[Tuple[uint64, ProofOfSpace]] = self.get_pospaces_for_challenge(
                    constants,
                    cc_challenge,
                    cc_sp_output_hash,
                    seed,
                    constants.DIFFICULTY_STARTING,
                    constants.SUB_SLOT_ITERS_STARTING,
                )

                # Try each of the proofs of space
                for required_iters, proof_of_space in qualified_proofs:
                    sp_iters: uint64 = calculate_sp_iters(
                        constants,
                        uint64(constants.SUB_SLOT_ITERS_STARTING),
                        uint8(signage_point_index),
                    )
                    ip_iters = calculate_ip_iters(
                        constants,
                        uint64(constants.SUB_SLOT_ITERS_STARTING),
                        uint8(signage_point_index),
                        required_iters,
                    )
                    is_overflow_block = is_overflow_sub_block(constants, uint8(signage_point_index))
                    if force_overflow and not is_overflow_block:
                        continue
                    if len(finished_sub_slots) < skip_slots:
                        continue

                    unfinished_block = create_unfinished_block(
                        constants,
                        sub_slot_total_iters,
                        constants.SUB_SLOT_ITERS_STARTING,
                        uint8(signage_point_index),
                        sp_iters,
                        ip_iters,
                        proof_of_space,
                        cc_challenge,
                        farmer_reward_puzzle_hash,
                        PoolTarget(constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0)),
                        self.get_plot_signature,
                        self.get_pool_key_signature,
                        signage_point,
                        timestamp,
                        seed,
                        finished_sub_slots_input=finished_sub_slots,
                    )
                    assert unfinished_block is not None
                    if not is_overflow_block:
                        cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
                            constants,
                            ClassgroupElement.get_default_element(),
                            cc_challenge,
                            ip_iters,
                        )
                        cc_ip_vdf = replace(cc_ip_vdf, number_of_iterations=ip_iters)
                        rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
                            constants,
                            ClassgroupElement.get_default_element(),
                            rc_challenge,
                            ip_iters,
                        )
                        assert unfinished_block is not None
                        total_iters_sp = uint128(sub_slot_total_iters + sp_iters)
                        return unfinished_block_to_full_block(
                            unfinished_block,
                            cc_ip_vdf,
                            cc_ip_proof,
                            rc_ip_vdf,
                            rc_ip_proof,
                            None,
                            None,
                            finished_sub_slots,
                            None,
                            {},
                            total_iters_sp,
                            constants.DIFFICULTY_STARTING,
                        )

                if signage_point_index == constants.NUM_SPS_SUB_SLOT - constants.NUM_SP_INTERVALS_EXTRA - 1:
                    # Finish the end of sub-slot and try again next sub-slot
                    cc_vdf, cc_proof = get_vdf_info_and_proof(
                        constants,
                        ClassgroupElement.get_default_element(),
                        cc_challenge,
                        constants.SUB_SLOT_ITERS_STARTING,
                    )
                    rc_vdf, rc_proof = get_vdf_info_and_proof(
                        constants,
                        ClassgroupElement.get_default_element(),
                        rc_challenge,
                        constants.SUB_SLOT_ITERS_STARTING,
                    )
                    cc_slot = ChallengeChainSubSlot(cc_vdf, None, None, None, None)
                    finished_sub_slots.append(
                        EndOfSubSlotBundle(
                            cc_slot,
                            None,
                            RewardChainSubSlot(
                                rc_vdf,
                                cc_slot.get_hash(),
                                None,
                                uint8(constants.MIN_SUB_BLOCKS_PER_CHALLENGE_BLOCK),
                            ),
                            SubSlotProofs(cc_proof, None, rc_proof),
                        )
                    )

                if unfinished_block is not None:
                    cc_ip_vdf, cc_ip_proof = get_vdf_info_and_proof(
                        constants,
                        ClassgroupElement.get_default_element(),
                        finished_sub_slots[-1].challenge_chain.get_hash(),
                        ip_iters,
                    )
                    rc_ip_vdf, rc_ip_proof = get_vdf_info_and_proof(
                        constants,
                        ClassgroupElement.get_default_element(),
                        finished_sub_slots[-1].reward_chain.get_hash(),
                        ip_iters,
                    )
                    total_iters_sp = uint128(
                        sub_slot_total_iters
                        + calculate_sp_iters(
                            self.constants,
                            self.constants.SUB_SLOT_ITERS_STARTING,
                            unfinished_block.reward_chain_sub_block.signage_point_index,
                        )
                    )
                    return unfinished_block_to_full_block(
                        unfinished_block,
                        cc_ip_vdf,
                        cc_ip_proof,
                        rc_ip_vdf,
                        rc_ip_proof,
                        None,
                        None,
                        finished_sub_slots,
                        None,
                        {},
                        total_iters_sp,
                        constants.DIFFICULTY_STARTING,
                    )
            sub_slot_total_iters = uint128(sub_slot_total_iters + constants.SUB_SLOT_ITERS_STARTING)

    def get_pospaces_for_challenge(
        self,
        constants: ConsensusConstants,
        challenge_hash: bytes32,
        signage_point: bytes32,
        seed: bytes,
        difficulty: uint64,
        sub_slot_iters: uint64,
    ) -> List[Tuple[uint64, ProofOfSpace]]:
        found_proofs: List[Tuple[uint64, ProofOfSpace]] = []
        plots: List[PlotInfo] = [
            plot_info for _, plot_info in sorted(list(self.plots.items()), key=lambda x: str(x[0]))
        ]
        random.seed(seed)
        for plot_info in plots:
            plot_id = plot_info.prover.get_id()
            if ProofOfSpace.passes_plot_filter(constants, plot_id, challenge_hash, signage_point):
                new_challenge: bytes32 = ProofOfSpace.calculate_pos_challenge(plot_id, challenge_hash, signage_point)
                qualities = plot_info.prover.get_qualities_for_challenge(new_challenge)

                for proof_index, quality_str in enumerate(qualities):

                    required_iters = calculate_iterations_quality(
                        quality_str,
                        plot_info.prover.get_size(),
                        difficulty,
                        signage_point,
                    )
                    if required_iters < calculate_sp_interval_iters(constants, sub_slot_iters):
                        proof_xs: bytes = plot_info.prover.get_full_proof(new_challenge, proof_index)
                        plot_pk = ProofOfSpace.generate_plot_public_key(
                            plot_info.local_sk.get_g1(),
                            plot_info.farmer_public_key,
                        )
                        proof_of_space: ProofOfSpace = ProofOfSpace(
                            new_challenge,
                            plot_info.pool_public_key,
                            None,
                            plot_pk,
                            plot_info.prover.get_size(),
                            proof_xs,
                        )
                        found_proofs.append((required_iters, proof_of_space))
        random_sample = found_proofs
        if len(found_proofs) >= 1:
            if random.random() < 0.1:
                # Removes some proofs of space to create "random" chains, based on the seed
                random_sample = random.sample(found_proofs, len(found_proofs) - 1)
        return random_sample
示例#24
0
async def setup_wallet_node(
    port,
    consensus_constants: ConsensusConstants,
    full_node_port=None,
    introducer_port=None,
    key_seed=None,
    starting_height=None,
):
    config = load_config(bt.root_path, "config.yaml", "wallet")
    if starting_height is not None:
        config["starting_height"] = starting_height
    config["initial_num_public_keys"] = 5

    entropy = token_bytes(32)
    keychain = Keychain(entropy.hex(), True)
    keychain.add_private_key(bytes_to_mnemonic(entropy), "")
    first_pk = keychain.get_first_public_key()
    assert first_pk is not None
    db_path_key_suffix = str(first_pk.get_fingerprint())
    db_name = f"test-wallet-db-{port}"
    db_path = bt.root_path / f"test-wallet-db-{port}-{db_path_key_suffix}"
    if db_path.exists():
        db_path.unlink()
    config["database_path"] = str(db_name)
    config["testing"] = True

    api = WalletNode(
        config,
        keychain,
        bt.root_path,
        consensus_constants=consensus_constants,
        name="wallet1",
    )
    periodic_introducer_poll = None
    if introducer_port is not None:
        periodic_introducer_poll = (
            PeerInfo(self_hostname, introducer_port),
            30,
            config["target_peer_count"],
        )
    connect_peers: List[PeerInfo] = []
    if full_node_port is not None:
        connect_peers = [PeerInfo(self_hostname, full_node_port)]

    started = asyncio.Event()

    async def start_callback():
        await api._start(new_wallet=True)
        nonlocal started
        started.set()

    def stop_callback():
        api._close()

    async def await_closed_callback():
        await api._await_closed()

    service = Service(
        root_path=bt.root_path,
        api=api,
        node_type=NodeType.WALLET,
        advertised_port=port,
        service_name="wallet",
        server_listen_ports=[port],
        connect_peers=connect_peers,
        auth_connect_peers=False,
        on_connect_callback=api._on_connect,
        start_callback=start_callback,
        stop_callback=stop_callback,
        await_closed_callback=await_closed_callback,
        periodic_introducer_poll=periodic_introducer_poll,
        parse_cli_args=False,
    )

    run_task = asyncio.create_task(service.run())
    await started.wait()

    yield api, api.server

    service.stop()
    await run_task
    if db_path.exists():
        db_path.unlink()
    keychain.delete_all_keys()
示例#25
0
class BlockTools:
    """
    Tools to generate blocks for testing.
    """
    def __init__(
        self,
        root_path: Path = TEST_ROOT_PATH,
        real_plots: bool = False,
    ):
        create_default_chia_config(root_path)
        initialize_ssl(root_path)
        self.root_path = root_path
        self.n_wesolowski = uint8(0)
        self.real_plots = real_plots

        if not real_plots:
            # No real plots supplied, so we will use the small test plots
            self.use_any_pos = True
            self.plot_config: Dict = {"plots": {}}
            # Can't go much lower than 19, since plots start having no solutions
            k: uint8 = uint8(19)
            # Uses many plots for testing, in order to guarantee proofs of space at every height
            num_plots = 40
            # Use the empty string as the seed for the private key

            self.keychain = Keychain("testing", True)
            self.keychain.delete_all_keys()
            self.keychain.add_private_key_seed(b"block_tools")
            pool_sk: PrivateKey = self.keychain.get_all_private_keys(
            )[0][0].get_private_key()
            pool_pk: PublicKey = pool_sk.get_public_key()

            plot_sks: List[PrivateKey] = [
                PrivateKey.from_seed(pn.to_bytes(4, "big"))
                for pn in range(num_plots)
            ]
            plot_pks: List[PublicKey] = [
                sk.get_public_key() for sk in plot_sks
            ]

            plot_seeds: List[bytes32] = [
                ProofOfSpace.calculate_plot_seed(pool_pk, plot_pk)
                for plot_pk in plot_pks
            ]
            plot_dir = get_plot_dir(root_path)
            mkdir(plot_dir)
            filenames: List[str] = [
                f"genesis-plots-{k}{std_hash(int.to_bytes(i, 4, 'big')).hex()}.dat"
                for i in range(num_plots)
            ]
            done_filenames = set()
            temp_dir = plot_dir / "plot.tmp"
            mkdir(temp_dir)
            try:
                for pn, filename in enumerate(filenames):
                    if not (plot_dir / filename).exists():
                        plotter = DiskPlotter()
                        plotter.create_plot_disk(
                            str(plot_dir),
                            str(plot_dir),
                            str(plot_dir),
                            filename,
                            k,
                            b"genesis",
                            plot_seeds[pn],
                        )
                        done_filenames.add(filename)
                    self.plot_config["plots"][str(plot_dir / filename)] = {
                        "pool_pk": bytes(pool_pk).hex(),
                        "sk": bytes(plot_sks[pn]).hex(),
                        "pool_sk": bytes(pool_sk).hex(),
                    }
                save_config(self.root_path, "plots.yaml", self.plot_config)

            except KeyboardInterrupt:
                for filename in filenames:
                    if (filename not in done_filenames
                            and (plot_dir / filename).exists()):
                        (plot_dir / filename).unlink()
                sys.exit(1)
        else:
            try:
                plot_config = load_config(DEFAULT_ROOT_PATH, "plots.yaml")
                normal_config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
            except FileNotFoundError:
                raise RuntimeError(
                    "Plots not generated. Run chia-create-plots")
            self.keychain = Keychain(testing=False)
            private_keys: List[PrivateKey] = [
                k.get_private_key()
                for (k, _) in self.keychain.get_all_private_keys()
            ]
            pool_pubkeys: List[PublicKey] = [
                sk.get_public_key() for sk in private_keys
            ]
            if len(private_keys) == 0:
                raise RuntimeError(
                    "Keys not generated. Run `chia generate keys`")

            self.prover_dict, _, _ = load_plots(normal_config["harvester"],
                                                plot_config, pool_pubkeys,
                                                DEFAULT_ROOT_PATH)

            new_plot_config: Dict = {"plots": {}}
            for key, value in plot_config["plots"].items():
                for sk in private_keys:
                    if (bytes(sk.get_public_key()).hex() == value["pool_pk"]
                            and key in self.prover_dict):
                        new_plot_config["plots"][key] = value
                        new_plot_config["plots"][key]["pool_sk"] = bytes(
                            sk).hex()

            self.plot_config = new_plot_config
            self.use_any_pos = False
            a = self.plot_config["plots"]
            print(f"Using {len(a)} reals plots to initialize block_tools")

        private_key = self.keychain.get_all_private_keys()[0][0]
        self.fee_target = create_puzzlehash_for_pk(
            BLSPublicKey(bytes(private_key.public_child(1).get_public_key())))

    def get_harvester_signature(self, header_data: HeaderData,
                                plot_pk: PublicKey):
        for value_dict in self.plot_config["plots"].values():
            if (PrivateKey.from_bytes(bytes.fromhex(
                    value_dict["sk"])).get_public_key() == plot_pk):
                return PrivateKey.from_bytes(bytes.fromhex(
                    value_dict["sk"])).sign_prepend(header_data.get_hash())

    def get_consecutive_blocks(
            self,
            input_constants: Dict,
            num_blocks: int,
            block_list: List[FullBlock] = [],
            seconds_per_block=None,
            seed: bytes = b"",
            reward_puzzlehash: bytes32 = None,
            transaction_data_at_height: Dict[int, Tuple[Program,
                                                        BLSSignature]] = None,
            fees: uint64 = uint64(0),
    ) -> List[FullBlock]:
        if transaction_data_at_height is None:
            transaction_data_at_height = {}
        test_constants: Dict[str, Any] = constants.copy()
        for key, value in input_constants.items():
            test_constants[key] = value
        if seconds_per_block is None:
            seconds_per_block = test_constants["BLOCK_TIME_TARGET"]

        if len(block_list) == 0:
            if "GENESIS_BLOCK" in test_constants:
                block_list.append(
                    FullBlock.from_bytes(test_constants["GENESIS_BLOCK"]))
            else:
                block_list.append(
                    self.create_genesis_block(test_constants, std_hash(seed),
                                              seed))
            prev_difficulty = test_constants["DIFFICULTY_STARTING"]
            curr_difficulty = prev_difficulty
            curr_min_iters = test_constants["MIN_ITERS_STARTING"]
        elif len(block_list) < (test_constants["DIFFICULTY_EPOCH"] +
                                test_constants["DIFFICULTY_DELAY"]):
            # First epoch (+delay), so just get first difficulty
            prev_difficulty = block_list[0].weight
            curr_difficulty = block_list[0].weight
            assert test_constants["DIFFICULTY_STARTING"] == prev_difficulty
            curr_min_iters = test_constants["MIN_ITERS_STARTING"]
        else:
            curr_difficulty = block_list[-1].weight - block_list[-2].weight
            prev_difficulty = (
                block_list[-1 - test_constants["DIFFICULTY_EPOCH"]].weight -
                block_list[-2 - test_constants["DIFFICULTY_EPOCH"]].weight)
            assert block_list[-1].proof_of_time is not None
            curr_min_iters = calculate_min_iters_from_iterations(
                block_list[-1].proof_of_space,
                curr_difficulty,
                block_list[-1].proof_of_time.number_of_iterations,
            )

        starting_height = block_list[-1].height + 1
        timestamp = block_list[-1].header.data.timestamp
        for next_height in range(starting_height,
                                 starting_height + num_blocks):
            if (next_height > test_constants["DIFFICULTY_EPOCH"]
                    and next_height % test_constants["DIFFICULTY_EPOCH"]
                    == test_constants["DIFFICULTY_DELAY"]):
                # Calculates new difficulty
                height1 = uint64(next_height -
                                 (test_constants["DIFFICULTY_EPOCH"] +
                                  test_constants["DIFFICULTY_DELAY"]) - 1)
                height2 = uint64(next_height -
                                 (test_constants["DIFFICULTY_EPOCH"]) - 1)
                height3 = uint64(next_height -
                                 (test_constants["DIFFICULTY_DELAY"]) - 1)
                if height1 >= 0:
                    block1 = block_list[height1]
                    iters1 = block1.header.data.total_iters
                    timestamp1 = block1.header.data.timestamp
                else:
                    block1 = block_list[0]
                    timestamp1 = (block1.header.data.timestamp -
                                  test_constants["BLOCK_TIME_TARGET"])
                    iters1 = uint64(0)
                timestamp2 = block_list[height2].header.data.timestamp
                timestamp3 = block_list[height3].header.data.timestamp

                block3 = block_list[height3]
                iters3 = block3.header.data.total_iters
                term1 = (test_constants["DIFFICULTY_DELAY"] * prev_difficulty *
                         (timestamp3 - timestamp2) *
                         test_constants["BLOCK_TIME_TARGET"])

                term2 = ((test_constants["DIFFICULTY_WARP_FACTOR"] - 1) *
                         (test_constants["DIFFICULTY_EPOCH"] -
                          test_constants["DIFFICULTY_DELAY"]) *
                         curr_difficulty * (timestamp2 - timestamp1) *
                         test_constants["BLOCK_TIME_TARGET"])

                # Round down after the division
                new_difficulty_precise: uint64 = uint64(
                    (term1 + term2) //
                    (test_constants["DIFFICULTY_WARP_FACTOR"] *
                     (timestamp3 - timestamp2) * (timestamp2 - timestamp1)))
                new_difficulty = uint64(
                    truncate_to_significant_bits(
                        new_difficulty_precise,
                        test_constants["SIGNIFICANT_BITS"]))
                max_diff = uint64(
                    truncate_to_significant_bits(
                        test_constants["DIFFICULTY_FACTOR"] * curr_difficulty,
                        test_constants["SIGNIFICANT_BITS"],
                    ))
                min_diff = uint64(
                    truncate_to_significant_bits(
                        curr_difficulty // test_constants["DIFFICULTY_FACTOR"],
                        test_constants["SIGNIFICANT_BITS"],
                    ))
                if new_difficulty >= curr_difficulty:
                    new_difficulty = min(
                        new_difficulty,
                        max_diff,
                    )
                else:
                    new_difficulty = max([uint64(1), new_difficulty, min_diff])

                min_iters_precise = uint64(
                    (iters3 - iters1) //
                    (test_constants["DIFFICULTY_EPOCH"] *
                     test_constants["MIN_ITERS_PROPORTION"]))
                curr_min_iters = uint64(
                    truncate_to_significant_bits(
                        min_iters_precise, test_constants["SIGNIFICANT_BITS"]))
                prev_difficulty = curr_difficulty
                curr_difficulty = new_difficulty
            time_taken = seconds_per_block
            timestamp += time_taken

            transactions: Optional[Program] = None
            aggsig: Optional[BLSSignature] = None
            if next_height in transaction_data_at_height:
                transactions, aggsig = transaction_data_at_height[next_height]

            update_difficulty = (next_height %
                                 test_constants["DIFFICULTY_EPOCH"] ==
                                 test_constants["DIFFICULTY_DELAY"])
            block_list.append(
                self.create_next_block(
                    test_constants,
                    block_list[-1],
                    timestamp,
                    update_difficulty,
                    curr_difficulty,
                    curr_min_iters,
                    seed,
                    reward_puzzlehash,
                    transactions,
                    aggsig,
                    fees,
                ))
        return block_list

    def create_genesis_block(
        self,
        input_constants: Dict,
        challenge_hash=bytes([0] * 32),
        seed: bytes = b"",
        reward_puzzlehash: Optional[bytes32] = None,
    ) -> FullBlock:
        """
        Creates the genesis block with the specified details.
        """
        test_constants: Dict[str, Any] = constants.copy()
        for key, value in input_constants.items():
            test_constants[key] = value

        return self._create_block(
            test_constants,
            challenge_hash,
            uint32(0),
            bytes([0] * 32),
            uint64(0),
            uint128(0),
            uint64(int(time.time())),
            uint64(test_constants["DIFFICULTY_STARTING"]),
            uint64(test_constants["MIN_ITERS_STARTING"]),
            seed,
            True,
            reward_puzzlehash,
        )

    def create_next_block(
            self,
            input_constants: Dict,
            prev_block: FullBlock,
            timestamp: uint64,
            update_difficulty: bool,
            difficulty: uint64,
            min_iters: uint64,
            seed: bytes = b"",
            reward_puzzlehash: bytes32 = None,
            transactions: Program = None,
            aggsig: BLSSignature = None,
            fees: uint64 = uint64(0),
    ) -> FullBlock:
        """
        Creates the next block with the specified details.
        """
        test_constants: Dict[str, Any] = constants.copy()
        for key, value in input_constants.items():
            test_constants[key] = value
        assert prev_block.proof_of_time is not None
        if update_difficulty:
            challenge = Challenge(
                prev_block.proof_of_space.challenge_hash,
                std_hash(prev_block.proof_of_space.get_hash() +
                         prev_block.proof_of_time.output.get_hash()),
                difficulty,
            )
        else:
            challenge = Challenge(
                prev_block.proof_of_space.challenge_hash,
                std_hash(prev_block.proof_of_space.get_hash() +
                         prev_block.proof_of_time.output.get_hash()),
                None,
            )

        return self._create_block(
            test_constants,
            challenge.get_hash(),
            uint32(prev_block.height + 1),
            prev_block.header_hash,
            prev_block.header.data.total_iters,
            prev_block.weight,
            timestamp,
            uint64(difficulty),
            min_iters,
            seed,
            False,
            reward_puzzlehash,
            transactions,
            aggsig,
            fees,
        )

    def _create_block(
            self,
            test_constants: Dict,
            challenge_hash: bytes32,
            height: uint32,
            prev_header_hash: bytes32,
            prev_iters: uint64,
            prev_weight: uint128,
            timestamp: uint64,
            difficulty: uint64,
            min_iters: uint64,
            seed: bytes,
            genesis: bool = False,
            reward_puzzlehash: bytes32 = None,
            transactions: Program = None,
            aggsig: BLSSignature = None,
            fees: uint64 = uint64(0),
    ) -> FullBlock:
        """
        Creates a block with the specified details. Uses the stored plots to create a proof of space,
        and also evaluates the VDF for the proof of time.
        """
        selected_prover = None
        selected_plot_sk = None
        selected_pool_sk = None
        selected_proof_index = 0
        plots = list(self.plot_config["plots"].items())
        selected_quality: Optional[bytes] = None
        best_quality = 0
        if self.use_any_pos:
            for i in range(len(plots) * 3):
                # Allow passing in seed, to create reorgs and different chains
                random.seed(seed + i.to_bytes(4, "big"))
                seeded_pn = random.randint(0, len(plots) - 1)
                pool_sk = PrivateKey.from_bytes(
                    bytes.fromhex(plots[seeded_pn][1]["pool_sk"]))
                plot_sk = PrivateKey.from_bytes(
                    bytes.fromhex(plots[seeded_pn][1]["sk"]))
                prover = DiskProver(plots[seeded_pn][0])
                qualities = prover.get_qualities_for_challenge(challenge_hash)
                if len(qualities) > 0:
                    if self.use_any_pos:
                        selected_quality = qualities[0]
                        selected_prover = prover
                        selected_pool_sk = pool_sk
                        selected_plot_sk = plot_sk
                        break
        else:
            for i in range(len(plots)):
                pool_sk = PrivateKey.from_bytes(
                    bytes.fromhex(plots[i][1]["pool_sk"]))
                plot_sk = PrivateKey.from_bytes(
                    bytes.fromhex(plots[i][1]["sk"]))
                try:
                    if self.real_plots:
                        prover = self.prover_dict[plots[i][0]]
                    else:
                        prover = DiskProver(plots[i][0])
                except (ValueError, KeyError) as e:
                    continue
                qualities = prover.get_qualities_for_challenge(challenge_hash)
                j = 0
                for quality in qualities:
                    qual_int = int.from_bytes(quality, "big", signed=False)
                    if qual_int > best_quality:
                        best_quality = qual_int
                        selected_quality = quality
                        selected_prover = prover
                        selected_pool_sk = pool_sk
                        selected_plot_sk = plot_sk
                        selected_proof_index = j
                    j += 1

        assert selected_prover
        assert selected_pool_sk
        assert selected_plot_sk
        pool_pk = selected_pool_sk.get_public_key()
        plot_pk = selected_plot_sk.get_public_key()
        if selected_quality is None:
            raise RuntimeError("No proofs for this challenge")

        proof_xs: bytes = selected_prover.get_full_proof(
            challenge_hash, selected_proof_index)
        proof_of_space: ProofOfSpace = ProofOfSpace(challenge_hash, pool_pk,
                                                    plot_pk,
                                                    selected_prover.get_size(),
                                                    proof_xs)
        number_iters: uint64 = pot_iterations.calculate_iterations(
            proof_of_space, difficulty, min_iters)
        # print("Doing iters", number_iters)
        int_size = (test_constants["DISCRIMINANT_SIZE_BITS"] + 16) >> 4

        result = prove(challenge_hash,
                       test_constants["DISCRIMINANT_SIZE_BITS"], number_iters)

        output = ClassgroupElement(
            int512(int.from_bytes(
                result[0:int_size],
                "big",
                signed=True,
            )),
            int512(
                int.from_bytes(
                    result[int_size:2 * int_size],
                    "big",
                    signed=True,
                )),
        )
        proof_bytes = result[2 * int_size:4 * int_size]

        proof_of_time = ProofOfTime(
            challenge_hash,
            number_iters,
            output,
            self.n_wesolowski,
            proof_bytes,
        )

        if not reward_puzzlehash:
            reward_puzzlehash = self.fee_target

        # Use the extension data to create different blocks based on header hash
        extension_data: bytes32 = bytes32(
            [random.randint(0, 255) for _ in range(32)])
        cost = uint64(0)

        coinbase_reward = block_rewards.calculate_block_reward(height)
        fee_reward = uint64(block_rewards.calculate_base_fee(height) + fees)

        coinbase_coin, coinbase_signature = create_coinbase_coin_and_signature(
            height, reward_puzzlehash, coinbase_reward, selected_pool_sk)

        parent_coin_name = std_hash(std_hash(height))
        fees_coin = Coin(parent_coin_name, reward_puzzlehash,
                         uint64(fee_reward))

        # Create filter
        byte_array_tx: List[bytes32] = []
        tx_additions: List[Coin] = []
        tx_removals: List[bytes32] = []
        encoded = None
        if transactions:
            error, npc_list, _ = get_name_puzzle_conditions(transactions)
            additions: List[Coin] = additions_for_npc(npc_list)
            for coin in additions:
                tx_additions.append(coin)
                byte_array_tx.append(bytearray(coin.puzzle_hash))
            for npc in npc_list:
                tx_removals.append(npc.coin_name)
                byte_array_tx.append(bytearray(npc.coin_name))

            bip158: PyBIP158 = PyBIP158(byte_array_tx)
            encoded = bytes(bip158.GetEncoded())

        removal_merkle_set = MerkleSet()
        addition_merkle_set = MerkleSet()

        # Create removal Merkle set
        for coin_name in tx_removals:
            removal_merkle_set.add_already_hashed(coin_name)

        # Create addition Merkle set
        puzzlehash_coin_map: Dict[bytes32, List[Coin]] = {}
        for coin in tx_additions:
            if coin.puzzle_hash in puzzlehash_coin_map:
                puzzlehash_coin_map[coin.puzzle_hash].append(coin)
            else:
                puzzlehash_coin_map[coin.puzzle_hash] = [coin]

        # Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
        for puzzle, coins in puzzlehash_coin_map.items():
            addition_merkle_set.add_already_hashed(puzzle)
            addition_merkle_set.add_already_hashed(hash_coin_list(coins))

        additions_root = addition_merkle_set.get_root()
        removal_root = removal_merkle_set.get_root()

        generator_hash = (transactions.get_tree_hash()
                          if transactions is not None else bytes32([0] * 32))
        filter_hash = std_hash(encoded) if encoded is not None else bytes32(
            [0] * 32)

        header_data: HeaderData = HeaderData(
            height,
            prev_header_hash,
            timestamp,
            filter_hash,
            proof_of_space.get_hash(),
            uint128(prev_weight + difficulty),
            uint64(prev_iters + number_iters),
            additions_root,
            removal_root,
            coinbase_coin,
            coinbase_signature,
            fees_coin,
            aggsig,
            cost,
            extension_data,
            generator_hash,
        )

        header_hash_sig: PrependSignature = selected_plot_sk.sign_prepend(
            header_data.get_hash())

        header: Header = Header(header_data, header_hash_sig)

        full_block: FullBlock = FullBlock(proof_of_space, proof_of_time,
                                          header, transactions, encoded)

        return full_block
bram_message = None

status = None
while True:
    status_input = input(
        "What is the status of this alert? (ready/not ready)").lower()
    if status_input == "ready":
        status = True
        break
    elif status_input == "not ready":
        status = False
        break
    else:
        print("Unknown input")

keychain: Keychain = Keychain()
print("\n___________ SELECT KEY ____________")

private_keys = keychain.get_all_private_keys()
if len(private_keys) == 0:
    print("There are no saved private keys.")
    quit()
print("Showing all private keys:")
for sk, seed in private_keys:
    print("\nFingerprint:", sk.get_g1().get_fingerprint())

selected_key = None
while True:
    user_input = input(
        "\nEnter fingerprint of the key you want to use, or enter Q to quit: "
    ).lower()
示例#27
0
def main():
    """
    Script for creating plots and adding them to the plot config file.
    """
    root_path = DEFAULT_ROOT_PATH
    plot_config_filename = config_path_for_filename(root_path, "plots.yaml")

    parser = argparse.ArgumentParser(description="Chia plotting script.")
    parser.add_argument("-k", "--size", help="Plot size", type=int, default=26)
    parser.add_argument(
        "-n", "--num_plots", help="Number of plots", type=int, default=1
    )
    parser.add_argument(
        "-i", "--index", help="First plot index", type=int, default=None
    )
    parser.add_argument(
        "-p", "--pool_pub_key", help="Hex public key of pool", type=str, default=""
    )
    parser.add_argument(
        "-s", "--sk_seed", help="Secret key seed in hex", type=str, default=None
    )
    parser.add_argument(
        "-t",
        "--tmp_dir",
        help="Temporary directory for plotting files",
        type=Path,
        default=Path("."),
    )
    parser.add_argument(
        "-2",
        "--tmp2_dir",
        help="Second temporary directory for plotting files",
        type=Path,
        default=Path("."),
    )
    new_plots_root = path_from_root(
        root_path,
        load_config(root_path, "config.yaml")
        .get("harvester", {})
        .get("new_plot_root", "plots"),
    )
    parser.add_argument(
        "-d",
        "--final_dir",
        help="Final directory for plots (relative or absolute)",
        type=Path,
        default=new_plots_root,
    )

    args = parser.parse_args()

    if args.sk_seed is None and args.index is not None:
        log(
            f"You have specified the -i (index) argument without the -s (sk_seed) argument."
            f" The program has changes, so that the sk_seed is now generated randomly, so -i is no longer necessary."
            f" Please run the program without -i."
        )
        quit()

    if args.index is None:
        args.index = 0

    # The seed is what will be used to generate a private key for each plot
    if args.sk_seed is not None:
        sk_seed: bytes = bytes.fromhex(args.sk_seed)
        log(f"Using the provided sk_seed {sk_seed.hex()}.")
    else:
        sk_seed = token_bytes(32)
        log(
            f"Using sk_seed {sk_seed.hex()}. Note that sk seed is now generated randomly, as opposed "
            f"to from keys.yaml. If you want to use a specific seed, use the -s argument."
        )

    pool_pk: PublicKey
    if len(args.pool_pub_key) > 0:
        # Use the provided pool public key, useful for using an external pool
        pool_pk = PublicKey.from_bytes(bytes.fromhex(args.pool_pub_key))
    else:
        # Use the pool public key from the config, useful for solo farming
        keychain = Keychain()
        all_public_keys = keychain.get_all_public_keys()
        if len(all_public_keys) == 0:
            raise RuntimeError(
                "There are no private keys in the keychain, so we cannot create a plot. "
                "Please generate keys using 'chia keys generate' or pass in a pool pk with -p"
            )
        pool_pk = all_public_keys[0].get_public_key()

    log(
        f"Creating {args.num_plots} plots, from index {args.index} to "
        f"{args.index + args.num_plots - 1}, of size {args.size}, sk_seed {sk_seed.hex()} ppk {pool_pk}"
    )

    mkdir(args.tmp_dir)
    mkdir(args.tmp2_dir)
    mkdir(args.final_dir)
    finished_filenames = []
    for i in range(args.index, args.index + args.num_plots):
        # Generate a sk based on the seed, plot size (k), and index
        sk: PrivateKey = PrivateKey.from_seed(
            sk_seed + args.size.to_bytes(1, "big") + i.to_bytes(4, "big")
        )

        # The plot seed is based on the pool and plot pks
        plot_seed: bytes32 = ProofOfSpace.calculate_plot_seed(
            pool_pk, sk.get_public_key()
        )
        dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M")

        filename: str = f"plot-k{args.size}-{dt_string}-{plot_seed}.dat"
        full_path: Path = args.final_dir / filename

        plot_config = load_config(root_path, plot_config_filename)
        plot_config_plots_new = deepcopy(plot_config.get("plots", []))
        filenames = [Path(k).name for k in plot_config_plots_new.keys()]
        already_in_config = any(plot_seed.hex() in fname for fname in filenames)
        if already_in_config:
            log(f"Plot {filename} already exists (in config)")
            continue

        if not full_path.exists():
            # Creates the plot. This will take a long time for larger plots.
            plotter: DiskPlotter = DiskPlotter()
            plotter.create_plot_disk(
                str(args.tmp_dir),
                str(args.tmp2_dir),
                str(args.final_dir),
                filename,
                args.size,
                bytes([]),
                plot_seed,
            )
            finished_filenames.append(filename)
        else:
            log(f"Plot {filename} already exists")

        # Updates the config if necessary.
        plot_config = load_config(root_path, plot_config_filename)
        plot_config_plots_new = deepcopy(plot_config.get("plots", []))
        plot_config_plots_new[str(full_path)] = {
            "sk": bytes(sk).hex(),
            "pool_pk": bytes(pool_pk).hex(),
        }
        plot_config["plots"].update(plot_config_plots_new)

        # Dumps the new config to disk.
        save_config(root_path, plot_config_filename, plot_config)
    log("")
    log("Summary:")
    try:
        args.tmp_dir.rmdir()
    except Exception:
        log(
            f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty."
        )
    try:
        args.tmp2_dir.rmdir()
    except Exception:
        log(
            f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty."
        )
    log(f"Created a total of {len(finished_filenames)} new plots")
    for filename in finished_filenames:
        log(filename)
示例#28
0
    def test_basic_add_delete(self):
        kc: Keychain = Keychain(testing=True)
        kc.delete_all_keys()

        assert kc._get_free_private_key_index() == 0
        assert len(kc.get_all_private_keys()) == 0
        assert kc.get_first_private_key() is None
        assert kc.get_first_public_key() is None

        mnemonic = generate_mnemonic()
        entropy = bytes_from_mnemonic(mnemonic)
        assert bytes_to_mnemonic(entropy) == mnemonic
        mnemonic_2 = generate_mnemonic()

        kc.add_private_key(mnemonic, "")
        assert kc._get_free_private_key_index() == 1
        assert len(kc.get_all_private_keys()) == 1

        kc.add_private_key(mnemonic_2, "")
        kc.add_private_key(mnemonic_2, "")  # checks to not add duplicates
        assert kc._get_free_private_key_index() == 2
        assert len(kc.get_all_private_keys()) == 2

        assert kc._get_free_private_key_index() == 2
        assert len(kc.get_all_private_keys()) == 2
        assert len(kc.get_all_public_keys()) == 2
        assert kc.get_all_private_keys()[0] == kc.get_first_private_key()
        assert kc.get_all_public_keys()[0] == kc.get_first_public_key()

        assert len(kc.get_all_private_keys()) == 2

        seed_2 = mnemonic_to_seed(mnemonic, "")
        seed_key_2 = AugSchemeMPL.key_gen(seed_2)
        kc.delete_key_by_fingerprint(seed_key_2.get_g1().get_fingerprint())
        assert kc._get_free_private_key_index() == 0
        assert len(kc.get_all_private_keys()) == 1

        kc.delete_all_keys()
        assert kc._get_free_private_key_index() == 0
        assert len(kc.get_all_private_keys()) == 0

        kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "my passphrase")
        kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "")
        kc.add_private_key(bytes_to_mnemonic(token_bytes(32)),
                           "third passphrase")

        assert len(kc.get_all_public_keys()) == 3
        assert len(kc.get_all_private_keys()) == 1
        assert len(kc.get_all_private_keys(["my passphrase", ""])) == 2
        assert len(
            kc.get_all_private_keys(
                ["my passphrase", "", "third passphrase", "another"])) == 3
        assert len(kc.get_all_private_keys(["my passhrase wrong"])) == 0

        assert kc.get_first_private_key() is not None
        assert kc.get_first_private_key(["bad passphrase"]) is None
        assert kc.get_first_public_key() is not None

        kc.delete_all_keys()
        kc.add_private_key(bytes_to_mnemonic(token_bytes(32)), "my passphrase")
        assert kc.get_first_public_key() is not None
示例#29
0
    def __init__(
        self,
        root_path: Optional[Path] = None,
        real_plots: bool = False,
    ):
        self._tempdir = None
        if root_path is None:
            self._tempdir = tempfile.TemporaryDirectory()
            root_path = Path(self._tempdir.name)
        self.root_path = root_path
        self.real_plots = real_plots

        if not real_plots:
            create_default_chia_config(root_path)
            initialize_ssl(root_path)
            # No real plots supplied, so we will use the small test plots
            self.use_any_pos = True
            self.keychain = Keychain("testing-1.8.0", True)
            self.keychain.delete_all_keys()
            self.farmer_master_sk = self.keychain.add_private_key(
                bytes_to_mnemonic(std_hash(b"block_tools farmer key")), "")
            self.pool_master_sk = self.keychain.add_private_key(
                bytes_to_mnemonic(std_hash(b"block_tools pool key")), "")
            self.farmer_pk = master_sk_to_farmer_sk(
                self.farmer_master_sk).get_g1()
            self.pool_pk = master_sk_to_pool_sk(self.pool_master_sk).get_g1()

            plot_dir = get_plot_dir()
            mkdir(plot_dir)
            temp_dir = plot_dir / "tmp"
            mkdir(temp_dir)
            args = Namespace()
            # Can't go much lower than 18, since plots start having no solutions
            args.size = 18
            # Uses many plots for testing, in order to guarantee proofs of space at every height
            args.num = 40
            args.buffer = 32
            args.farmer_public_key = bytes(self.farmer_pk).hex()
            args.pool_public_key = bytes(self.pool_pk).hex()
            args.tmp_dir = temp_dir
            args.tmp2_dir = plot_dir
            args.final_dir = plot_dir
            args.plotid = None
            args.memo = None
            test_private_keys = [
                AugSchemeMPL.key_gen(std_hash(bytes([i])))
                for i in range(args.num)
            ]
            try:
                # No datetime in the filename, to get deterministic filenames and not replot
                create_plots(
                    args,
                    root_path,
                    use_datetime=False,
                    test_private_keys=test_private_keys,
                )
            except KeyboardInterrupt:
                shutil.rmtree(plot_dir, ignore_errors=True)
                sys.exit(1)
        else:
            initialize_ssl(root_path)
            self.keychain = Keychain()
            self.use_any_pos = False
            sk_and_ent = self.keychain.get_first_private_key()
            assert sk_and_ent is not None
            self.farmer_master_sk = sk_and_ent[0]
            self.pool_master_sk = sk_and_ent[0]

        self.farmer_ph = create_puzzlehash_for_pk(
            master_sk_to_wallet_sk(self.farmer_master_sk, uint32(0)).get_g1())
        self.pool_ph = create_puzzlehash_for_pk(
            master_sk_to_wallet_sk(self.pool_master_sk, uint32(0)).get_g1())

        self.all_sks = self.keychain.get_all_private_keys()
        self.pool_pubkeys: List[G1Element] = [
            master_sk_to_pool_sk(sk).get_g1() for sk, _ in self.all_sks
        ]
        farmer_pubkeys: List[G1Element] = [
            master_sk_to_farmer_sk(sk).get_g1() for sk, _ in self.all_sks
        ]
        if len(self.pool_pubkeys) == 0 or len(farmer_pubkeys) == 0:
            raise RuntimeError("Keys not generated. Run `chia generate keys`")
        _, self.plots, _, _ = load_plots({}, {}, farmer_pubkeys,
                                         self.pool_pubkeys, root_path)
示例#30
0
    def __init__(
        self,
        root_path: Path = TEST_ROOT_PATH,
        real_plots: bool = False,
    ):
        create_default_chia_config(root_path)
        initialize_ssl(root_path)
        self.root_path = root_path
        self.n_wesolowski = uint8(0)
        self.real_plots = real_plots

        if not real_plots:
            # No real plots supplied, so we will use the small test plots
            self.use_any_pos = True
            self.plot_config: Dict = {"plots": {}}
            # Can't go much lower than 19, since plots start having no solutions
            k: uint8 = uint8(19)
            # Uses many plots for testing, in order to guarantee proofs of space at every height
            num_plots = 40
            # Use the empty string as the seed for the private key

            self.keychain = Keychain("testing", True)
            self.keychain.delete_all_keys()
            self.keychain.add_private_key_seed(b"block_tools")
            pool_sk: PrivateKey = self.keychain.get_all_private_keys(
            )[0][0].get_private_key()
            pool_pk: PublicKey = pool_sk.get_public_key()

            plot_sks: List[PrivateKey] = [
                PrivateKey.from_seed(pn.to_bytes(4, "big"))
                for pn in range(num_plots)
            ]
            plot_pks: List[PublicKey] = [
                sk.get_public_key() for sk in plot_sks
            ]

            plot_seeds: List[bytes32] = [
                ProofOfSpace.calculate_plot_seed(pool_pk, plot_pk)
                for plot_pk in plot_pks
            ]
            plot_dir = get_plot_dir(root_path)
            mkdir(plot_dir)
            filenames: List[str] = [
                f"genesis-plots-{k}{std_hash(int.to_bytes(i, 4, 'big')).hex()}.dat"
                for i in range(num_plots)
            ]
            done_filenames = set()
            temp_dir = plot_dir / "plot.tmp"
            mkdir(temp_dir)
            try:
                for pn, filename in enumerate(filenames):
                    if not (plot_dir / filename).exists():
                        plotter = DiskPlotter()
                        plotter.create_plot_disk(
                            str(plot_dir),
                            str(plot_dir),
                            str(plot_dir),
                            filename,
                            k,
                            b"genesis",
                            plot_seeds[pn],
                        )
                        done_filenames.add(filename)
                    self.plot_config["plots"][str(plot_dir / filename)] = {
                        "pool_pk": bytes(pool_pk).hex(),
                        "sk": bytes(plot_sks[pn]).hex(),
                        "pool_sk": bytes(pool_sk).hex(),
                    }
                save_config(self.root_path, "plots.yaml", self.plot_config)

            except KeyboardInterrupt:
                for filename in filenames:
                    if (filename not in done_filenames
                            and (plot_dir / filename).exists()):
                        (plot_dir / filename).unlink()
                sys.exit(1)
        else:
            try:
                plot_config = load_config(DEFAULT_ROOT_PATH, "plots.yaml")
                normal_config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
            except FileNotFoundError:
                raise RuntimeError(
                    "Plots not generated. Run chia-create-plots")
            self.keychain = Keychain(testing=False)
            private_keys: List[PrivateKey] = [
                k.get_private_key()
                for (k, _) in self.keychain.get_all_private_keys()
            ]
            pool_pubkeys: List[PublicKey] = [
                sk.get_public_key() for sk in private_keys
            ]
            if len(private_keys) == 0:
                raise RuntimeError(
                    "Keys not generated. Run `chia generate keys`")

            self.prover_dict, _, _ = load_plots(normal_config["harvester"],
                                                plot_config, pool_pubkeys,
                                                DEFAULT_ROOT_PATH)

            new_plot_config: Dict = {"plots": {}}
            for key, value in plot_config["plots"].items():
                for sk in private_keys:
                    if (bytes(sk.get_public_key()).hex() == value["pool_pk"]
                            and key in self.prover_dict):
                        new_plot_config["plots"][key] = value
                        new_plot_config["plots"][key]["pool_sk"] = bytes(
                            sk).hex()

            self.plot_config = new_plot_config
            self.use_any_pos = False
            a = self.plot_config["plots"]
            print(f"Using {len(a)} reals plots to initialize block_tools")

        private_key = self.keychain.get_all_private_keys()[0][0]
        self.fee_target = create_puzzlehash_for_pk(
            BLSPublicKey(bytes(private_key.public_child(1).get_public_key())))