Ejemplo n.º 1
0
    async def create(cls, db_wrapper: DBWrapper):
        self = cls()

        # All full blocks which have been added to the blockchain. Header_hash -> block
        self.db_wrapper = db_wrapper
        self.db = db_wrapper.db
        await self.db.execute("pragma journal_mode=wal")
        await self.db.execute("pragma synchronous=2")
        await self.db.execute(
            "CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint,"
            "  is_block tinyint, is_fully_compactified tinyint, block blob)")

        # Block records
        await self.db.execute(
            "CREATE TABLE IF NOT EXISTS block_records(header_hash "
            "text PRIMARY KEY, prev_hash text, height bigint,"
            "block blob, sub_epoch_summary blob, is_peak tinyint, is_block tinyint)"
        )

        # todo remove in v1.2
        await self.db.execute("DROP TABLE IF EXISTS sub_epoch_segments_v2")

        # Sub epoch segments for weight proofs
        await self.db.execute(
            "CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(ses_block_hash text PRIMARY KEY, challenge_segments blob)"
        )

        # Height index so we can look up in order of height for sync purposes
        await self.db.execute(
            "CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)"
        )
        await self.db.execute(
            "CREATE INDEX IF NOT EXISTS is_block on full_blocks(is_block)")
        await self.db.execute(
            "CREATE INDEX IF NOT EXISTS is_fully_compactified on full_blocks(is_fully_compactified)"
        )

        await self.db.execute(
            "CREATE INDEX IF NOT EXISTS height on block_records(height)")

        await self.db.execute(
            "CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
        await self.db.execute(
            "CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
        await self.db.execute(
            "CREATE INDEX IF NOT EXISTS is_block on block_records(is_block)")

        await self.db.commit()
        self.block_cache = LRUCache(1000)
        self.ses_challenge_cache = LRUCache(50)
        return self
Ejemplo n.º 2
0
    async def sync_job(self) -> None:
        while True:
            self.log.info("Loop start in sync job")
            if self._shut_down is True:
                break
            asyncio.create_task(self.check_new_peak())
            await self.sync_event.wait()
            self.last_new_peak_messages = LRUCache(5)
            self.sync_event.clear()

            if self._shut_down is True:
                break
            try:
                assert self.wallet_state_manager is not None
                self.wallet_state_manager.set_sync_mode(True)
                await self._sync()
            except Exception as e:
                tb = traceback.format_exc()
                self.log.error(f"Loop exception in sync {e}. {tb}")
            finally:
                if self.wallet_state_manager is not None:
                    self.wallet_state_manager.set_sync_mode(False)
                for peer, peak in self.last_new_peak_messages.cache.items():
                    asyncio.create_task(self.new_peak_wallet(peak, peer))
            self.log.info("Loop end in sync job")
Ejemplo n.º 3
0
    async def create(cls):
        self = cls()
        self.db_path = Path("pooldb.sqlite")
        self.connection = await aiosqlite.connect(self.db_path)
        self.lock = asyncio.Lock()
        await self.connection.execute("pragma journal_mode=wal")
        await self.connection.execute("pragma synchronous=2")
        await self.connection.execute(("CREATE TABLE IF NOT EXISTS farmer("
                                       "singleton_genesis text PRIMARY KEY,"
                                       " owner_public_key text,"
                                       " pool_puzzle_hash text,"
                                       " relative_lock_height bigint,"
                                       " p2_singleton_puzzle_hash text,"
                                       " blockchain_height bigint,"
                                       " singleton_coin_id text,"
                                       " points bigint,"
                                       " difficulty bigint,"
                                       " rewards_target text,"
                                       " is_pool_member tinyint)"))

        # Useful for reorg lookups
        await self.connection.execute(
            "CREATE INDEX IF NOT EXISTS scan_ph on farmer(p2_singleton_puzzle_hash)"
        )

        await self.connection.commit()
        self.coin_record_cache = LRUCache(1000)

        return self
Ejemplo n.º 4
0
    async def create(cls,
                     db_wrapper: DBWrapper,
                     cache_size: uint32 = uint32(60000)):
        self = cls()

        self.cache_size = cache_size
        self.db_wrapper = db_wrapper
        self.coin_record_db = db_wrapper.db

        if self.db_wrapper.db_version == 2:

            # the coin_name is unique in this table because the CoinStore always
            # only represent a single peak
            await self.coin_record_db.execute(
                "CREATE TABLE IF NOT EXISTS coin_record("
                "coin_name blob PRIMARY KEY,"
                " confirmed_index bigint,"
                " spent_index bigint,"  # if this is zero, it means the coin has not been spent
                " coinbase int,"
                " puzzle_hash blob,"
                " coin_parent blob,"
                " amount blob,"  # we use a blob of 8 bytes to store uint64
                " timestamp bigint)")

        else:

            # the coin_name is unique in this table because the CoinStore always
            # only represent a single peak
            await self.coin_record_db.execute(
                ("CREATE TABLE IF NOT EXISTS coin_record("
                 "coin_name text PRIMARY KEY,"
                 " confirmed_index bigint,"
                 " spent_index bigint,"
                 " spent int,"
                 " coinbase int,"
                 " puzzle_hash text,"
                 " coin_parent text,"
                 " amount blob,"
                 " timestamp bigint)"))

        # Useful for reorg lookups
        await self.coin_record_db.execute(
            "CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
        )

        await self.coin_record_db.execute(
            "CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)"
        )

        await self.coin_record_db.execute(
            "CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)"
        )

        await self.coin_record_db.execute(
            "CREATE INDEX IF NOT EXISTS coin_parent_index on coin_record(coin_parent)"
        )

        await self.coin_record_db.commit()
        self.coin_record_cache = LRUCache(cache_size)
        return self
Ejemplo n.º 5
0
    async def create(cls, db_wrapper: DBWrapper):
        self = cls()

        self.db_wrapper = db_wrapper
        self.db = db_wrapper.db
        await self.db.execute(
            "CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, height int,"
            " timestamp int, block blob)"
        )

        await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)")

        await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)")

        await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)")

        # Block records
        await self.db.execute(
            "CREATE TABLE IF NOT EXISTS block_records(header_hash "
            "text PRIMARY KEY, prev_hash text, height bigint, weight bigint, total_iters text,"
            "block blob, sub_epoch_summary blob, is_peak tinyint)"
        )

        await self.db.execute(
            "CREATE TABLE IF NOT EXISTS additional_coin_spends(header_hash text PRIMARY KEY, spends_list_blob blob)"
        )

        # Height index so we can look up in order of height for sync purposes
        await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")

        await self.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
        await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
        await self.db.commit()
        self.block_cache = LRUCache(1000)
        return self
Ejemplo n.º 6
0
    def test_cached_bls(self):
        n_keys = 10
        seed = b"a" * 31
        sks = [AugSchemeMPL.key_gen(seed + bytes([i])) for i in range(n_keys)]
        pks = [bytes(sk.get_g1()) for sk in sks]

        msgs = [("msg-%d" % (i,)).encode() for i in range(n_keys)]
        sigs = [AugSchemeMPL.sign(sk, msg) for sk, msg in zip(sks, msgs)]
        agg_sig = AugSchemeMPL.aggregate(sigs)

        pks_half = pks[: n_keys // 2]
        msgs_half = msgs[: n_keys // 2]
        sigs_half = sigs[: n_keys // 2]
        agg_sig_half = AugSchemeMPL.aggregate(sigs_half)

        assert AugSchemeMPL.aggregate_verify([G1Element.from_bytes(pk) for pk in pks], msgs, agg_sig)

        # Verify with empty cache and populate it
        assert cached_bls.aggregate_verify(pks_half, msgs_half, agg_sig_half, True)
        # Verify with partial cache hit
        assert cached_bls.aggregate_verify(pks, msgs, agg_sig, True)
        # Verify with full cache hit
        assert cached_bls.aggregate_verify(pks, msgs, agg_sig)

        # Use a small cache which can not accommodate all pairings
        local_cache = LRUCache(n_keys // 2)
        # Verify signatures and cache pairings one at a time
        for pk, msg, sig in zip(pks_half, msgs_half, sigs_half):
            assert cached_bls.aggregate_verify([pk], [msg], sig, True, local_cache)
        # Verify the same messages with aggregated signature (full cache hit)
        assert cached_bls.aggregate_verify(pks_half, msgs_half, agg_sig_half, False, local_cache)
        # Verify more messages (partial cache hit)
        assert cached_bls.aggregate_verify(pks, msgs, agg_sig, False, local_cache)
Ejemplo n.º 7
0
    def __init__(self, private_key: PrivateKey, config: Dict,
                 constants: ConsensusConstants):
        self.log = logging.getLogger(__name__)
        self.private_key = private_key
        self.public_key: G1Element = private_key.get_g1()
        self.config = config
        self.constants = constants
        self.node_rpc_client = None

        self.store: Optional[PoolStore] = None

        self.pool_fee = 0.01

        # This number should be held constant and be consistent for every pool in the network
        self.iters_limit = 734000000

        # This number should not be changed, since users will put this into their singletons
        self.relative_lock_height = uint32(100)

        # TODO: potentially tweak these numbers for security and performance
        self.pool_url = "https://myreferencepool.com"
        self.min_difficulty = uint64(
            100)  # 100 difficulty is about 1 proof a day per plot
        self.default_difficulty: uint64 = uint64(100)
        self.max_difficulty = uint64(1000)

        # TODO: store this information in a persistent DB
        self.account_points: Dict[bytes, int] = {
        }  # Points are added by submitting partials
        self.account_rewards_targets: Dict[bytes, bytes] = {}

        self.pending_point_partials: Optional[asyncio.Queue] = None
        self.recent_points_added: LRUCache = LRUCache(20000)

        # This is where the block rewards will get paid out to. The pool needs to support this address forever,
        # since the farmers will encode it into their singleton on the blockchain.
        self.default_pool_puzzle_hash: bytes32 = decode_puzzle_hash(
            "xch12ma5m7sezasgh95wkyr8470ngryec27jxcvxcmsmc4ghy7c4njssnn623q")

        # We need to check for slow farmers. If farmers cannot submit proofs in time, they won't be able to win
        # any rewards either. This number can be tweaked to be more or less strict. More strict ensures everyone
        # gets high rewards, but it might cause some of the slower farmers to not be able to participate in the pool.
        self.partial_time_limit: int = 25

        # There is always a risk of a reorg, in which case we cannot reward farmers that submitted partials in that
        # reorg. That is why we have a time delay before changing any account points.
        self.partial_confirmation_delay: int = 300

        self.full_node_client: Optional[FullNodeRpcClient] = None
        self.confirm_partials_loop_task: Optional[asyncio.Task] = None
        self.difficulty_change_time: Dict[bytes32, uint64] = {}

        self.scan_p2_singleton_puzzle_hashes: Set[bytes32] = set()
        self.blockchain_state = {"peak": None}
Ejemplo n.º 8
0
 def __init__(self, constants: ConsensusConstants):
     self.candidate_blocks = {}
     self.candidate_backup_blocks = {}
     self.seen_unfinished_blocks = set()
     self.unfinished_blocks = {}
     self.finished_sub_slots = []
     self.future_eos_cache = {}
     self.future_sp_cache = {}
     self.future_ip_cache = {}
     self.recent_signage_points = LRUCache(500)
     self.recent_eos = LRUCache(50)
     self.requesting_unfinished_blocks = set()
     self.previous_generator = None
     self.future_cache_key_times = {}
     self.constants = constants
     self.clear_slots()
     self.initialize_genesis_sub_slot()
     self.pending_tx_request = {}
     self.peers_with_tx = {}
     self.tx_fetch_tasks = {}
     self.serialized_wp_message = None
     self.serialized_wp_message_tip = None
Ejemplo n.º 9
0
    def test_lru_cache(self):
        cache = LRUCache(5)

        assert cache.get(b"0") is None

        assert len(cache.cache) == 0
        cache.put(b"0", 1)
        assert len(cache.cache) == 1
        assert cache.get(b"0") == 1
        cache.put(b"0", 2)
        cache.put(b"0", 3)
        cache.put(b"0", 4)
        cache.put(b"0", 6)
        assert cache.get(b"0") == 6
        assert len(cache.cache) == 1

        cache.put(b"1", 1)
        assert len(cache.cache) == 2
        assert cache.get(b"0") == 6
        assert cache.get(b"1") == 1
        cache.put(b"2", 2)
        assert len(cache.cache) == 3
        assert cache.get(b"0") == 6
        assert cache.get(b"1") == 1
        assert cache.get(b"2") == 2
        cache.put(b"3", 3)
        assert len(cache.cache) == 4
        assert cache.get(b"0") == 6
        assert cache.get(b"1") == 1
        assert cache.get(b"2") == 2
        assert cache.get(b"3") == 3
        cache.put(b"4", 4)
        assert len(cache.cache) == 5
        assert cache.get(b"0") == 6
        assert cache.get(b"1") == 1
        assert cache.get(b"2") == 2
        assert cache.get(b"4") == 4
        cache.put(b"5", 5)
        assert cache.get(b"5") == 5
        assert len(cache.cache) == 5
        print(cache.cache)
        assert cache.get(b"3") is None  # 3 is least recently used
        assert cache.get(b"1") == 1
        assert cache.get(b"2") == 2
        cache.put(b"7", 7)
        assert len(cache.cache) == 5
        assert cache.get(b"0") is None
        assert cache.get(b"1") == 1
Ejemplo n.º 10
0
def validate_clvm_and_signature(
        spend_bundle_bytes: bytes, max_cost: int, cost_per_byte: int,
        additional_data: bytes
) -> Tuple[Optional[Err], bytes, Dict[bytes, bytes]]:
    """
    Validates CLVM and aggregate signature for a spendbundle. This is meant to be called under a ProcessPoolExecutor
    in order to validate the heavy parts of a transction in a different thread. Returns an optional error,
    the NPCResult and a cache of the new pairings validated (if not error)
    """
    try:
        bundle: SpendBundle = SpendBundle.from_bytes(spend_bundle_bytes)
        program = simple_solution_generator(bundle)
        # npc contains names of the coins removed, puzzle_hashes and their spend conditions
        result: NPCResult = get_name_puzzle_conditions(
            program, max_cost, cost_per_byte=cost_per_byte, mempool_mode=True)

        if result.error is not None:
            return Err(result.error), b"", {}

        pks: List[G1Element] = []
        msgs: List[bytes32] = []
        # TODO: address hint error and remove ignore
        #       error: Incompatible types in assignment (expression has type "List[bytes]", variable has type
        #       "List[bytes32]")  [assignment]
        pks, msgs = pkm_pairs(result.npc_list,
                              additional_data)  # type: ignore[assignment]

        # Verify aggregated signature
        cache: LRUCache = LRUCache(10000)
        if not cached_bls.aggregate_verify(
                pks, msgs, bundle.aggregated_signature, True, cache):
            return Err.BAD_AGGREGATE_SIGNATURE, b"", {}
        new_cache_entries: Dict[bytes, bytes] = {}
        for k, v in cache.cache.items():
            new_cache_entries[k] = bytes(v)
    except ValidationError as e:
        return e.code, b"", {}
    except Exception:
        return Err.UNKNOWN, b"", {}

    return None, bytes(result), new_cache_entries
Ejemplo n.º 11
0
    async def create(cls,
                     db_wrapper: DBWrapper,
                     cache_size: uint32 = uint32(60000)):
        self = cls()

        self.cache_size = cache_size
        self.db_wrapper = db_wrapper
        self.coin_record_db = db_wrapper.db
        await self.coin_record_db.execute("pragma journal_mode=wal")
        await self.coin_record_db.execute("pragma synchronous=2")
        await self.coin_record_db.execute(
            ("CREATE TABLE IF NOT EXISTS coin_record("
             "coin_name text PRIMARY KEY,"
             " confirmed_index bigint,"
             " spent_index bigint,"
             " spent int,"
             " coinbase int,"
             " puzzle_hash text,"
             " coin_parent text,"
             " amount blob,"
             " timestamp bigint)"))

        # Useful for reorg lookups
        await self.coin_record_db.execute(
            "CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
        )

        await self.coin_record_db.execute(
            "CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)"
        )

        await self.coin_record_db.execute(
            "CREATE INDEX IF NOT EXISTS coin_spent on coin_record(spent)")

        await self.coin_record_db.execute(
            "CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)"
        )

        await self.coin_record_db.commit()
        self.coin_record_cache = LRUCache(cache_size)
        return self
Ejemplo n.º 12
0
    def __init__(
        self,
        config: Dict,
        keychain: Keychain,
        root_path: Path,
        consensus_constants: ConsensusConstants,
        name: str = None,
    ):
        self.config = config
        self.constants = consensus_constants
        self.root_path = root_path
        if name:
            self.log = logging.getLogger(name)
        else:
            self.log = logging.getLogger(__name__)
        # Normal operation data
        self.cached_blocks: Dict = {}
        self.future_block_hashes: Dict = {}
        self.keychain = keychain

        # Sync data
        self._shut_down = False
        self.proof_hashes: List = []
        self.header_hashes: List = []
        self.header_hashes_error = False
        self.short_sync_threshold = 15  # Change the test when changing this
        self.potential_blocks_received: Dict = {}
        self.potential_header_hashes: Dict = {}
        self.state_changed_callback = None
        self.wallet_state_manager = None
        self.backup_initialized = False  # Delay first launch sync after user imports backup info or decides to skip
        self.server = None
        self.wsm_close_task = None
        self.sync_task: Optional[asyncio.Task] = None
        self.new_peak_lock: Optional[asyncio.Lock] = None
        self.logged_in_fingerprint: Optional[int] = None
        self.peer_task = None
        self.logged_in = False
        self.last_new_peak_messages = LRUCache(5)
Ejemplo n.º 13
0
    def __init__(self, private_key: PrivateKey, config: Dict,
                 constants: ConsensusConstants):
        self.log = logging
        # If you want to log to a file: use filename='example.log', encoding='utf-8'
        self.log.basicConfig(level=logging.INFO)

        self.private_key = private_key
        self.public_key: G1Element = private_key.get_g1()
        self.config = config
        self.constants = constants
        self.node_rpc_client = None
        self.wallet_rpc_client = None

        self.store: Optional[PoolStore] = None

        self.pool_fee = 0.01

        # This number should be held constant and be consistent for every pool in the network. DO NOT CHANGE
        self.iters_limit = self.constants.POOL_SUB_SLOT_ITERS // 64

        # This number should not be changed, since users will put this into their singletons
        self.relative_lock_height = uint32(100)

        # TODO(pool): potentially tweak these numbers for security and performance
        self.pool_url = "https://myreferencepool.com"
        self.min_difficulty = uint64(
            10)  # 10 difficulty is about 1 proof a day per plot
        self.default_difficulty: uint64 = uint64(10)

        self.pending_point_partials: Optional[asyncio.Queue] = None
        self.recent_points_added: LRUCache = LRUCache(20000)

        # This is where the block rewards will get paid out to. The pool needs to support this address forever,
        # since the farmers will encode it into their singleton on the blockchain.

        self.default_pool_puzzle_hash: bytes32 = bytes32(
            decode_puzzle_hash(
                "xch12ma5m7sezasgh95wkyr8470ngryec27jxcvxcmsmc4ghy7c4njssnn623q"
            ))

        # The pool fees will be sent to this address
        self.pool_fee_puzzle_hash: bytes32 = bytes32(
            decode_puzzle_hash(
                "txch1h8ggpvqzhrquuchquk7s970cy0m0e0yxd4hxqwzqkpzxk9jx9nzqmd67ux"
            ))

        # This is the wallet fingerprint and ID for the wallet spending the funds from `self.default_pool_puzzle_hash`
        self.wallet_fingerprint = 2938470744
        self.wallet_id = "1"

        # We need to check for slow farmers. If farmers cannot submit proofs in time, they won't be able to win
        # any rewards either. This number can be tweaked to be more or less strict. More strict ensures everyone
        # gets high rewards, but it might cause some of the slower farmers to not be able to participate in the pool.
        self.partial_time_limit: int = 25

        # There is always a risk of a reorg, in which case we cannot reward farmers that submitted partials in that
        # reorg. That is why we have a time delay before changing any account points.
        self.partial_confirmation_delay: int = 30

        # Keeps track of when each farmer last changed their difficulty, to rate limit how often they can change it
        # This helps when the farmer is farming from two machines at the same time (with conflicting difficulties)
        self.difficulty_change_time: Dict[bytes32, uint64] = {}

        # These are the phs that we want to look for on chain, that we can claim to our pool
        self.scan_p2_singleton_puzzle_hashes: Set[bytes32] = set()

        # Don't scan anything before this height, for efficiency (for example pool start date)
        self.scan_start_height: uint32 = uint32(1000)

        # Interval for scanning and collecting the pool rewards
        self.collect_pool_rewards_interval = 600

        # After this many confirmations, a transaction is considered final and irreversible
        self.confirmation_security_threshold = 6

        # Interval for making payout transactions to farmers
        self.payment_interval = 600

        # We will not make transactions with more targets than this, to ensure our transaction gets into the blockchain
        # faster.
        self.max_additions_per_transaction = 400

        # This is the list of payments that we have not sent yet, to farmers
        self.pending_payments: Optional[asyncio.Queue] = None

        # Keeps track of the latest state of our node
        self.blockchain_state = {"peak": None}

        # Whether or not the wallet is synced (required to make payments)
        self.wallet_synced = False

        # Tasks (infinite While loops) for different purposes
        self.confirm_partials_loop_task: Optional[asyncio.Task] = None
        self.collect_pool_rewards_loop_task: Optional[asyncio.Task] = None
        self.create_payment_loop_task: Optional[asyncio.Task] = None
        self.submit_payment_loop_task: Optional[asyncio.Task] = None
        self.get_peak_loop_task: Optional[asyncio.Task] = None

        self.node_rpc_client: Optional[FullNodeRpcClient] = None
        self.wallet_rpc_client: Optional[WalletRpcClient] = None
Ejemplo n.º 14
0
    def __init__(self, private_key: PrivateKey, config: Dict,
                 constants: ConsensusConstants):
        self.follow_singleton_tasks: Dict[bytes32, asyncio.Task] = {}
        self.log = logging
        # If you want to log to a file: use filename='example.log', encoding='utf-8'
        self.log.basicConfig(level=logging.INFO)

        # We load our configurations from here
        with open(os.getcwd() + "/config.yaml") as f:
            pool_config: Dict = yaml.safe_load(f)

        # Set our pool info here
        self.info_default_res = pool_config["pool_info"]["default_res"]
        self.info_name = pool_config["pool_info"]["name"]
        self.info_logo_url = pool_config["pool_info"]["logo_url"]
        self.info_description = pool_config["pool_info"]["description"]
        self.welcome_message = pool_config["welcome_message"]

        self.private_key = private_key
        self.public_key: G1Element = private_key.get_g1()
        self.config = config
        self.constants = constants
        self.node_rpc_client = None
        self.wallet_rpc_client = None

        self.store: Optional[PoolStore] = None

        self.pool_fee = pool_config["pool_fee"]

        # This number should be held constant and be consistent for every pool in the network. DO NOT CHANGE
        self.iters_limit = self.constants.POOL_SUB_SLOT_ITERS // 64

        # This number should not be changed, since users will put this into their singletons
        self.relative_lock_height = uint32(100)

        # TODO(pool): potentially tweak these numbers for security and performance
        # This is what the user enters into the input field. This exact value will be stored on the blockchain
        self.pool_url = pool_config["pool_url"]
        self.min_difficulty = uint64(
            pool_config["min_difficulty"]
        )  # 10 difficulty is about 1 proof a day per plot
        self.default_difficulty: uint64 = uint64(
            pool_config["default_difficulty"])

        self.pending_point_partials: Optional[asyncio.Queue] = None
        self.recent_points_added: LRUCache = LRUCache(20000)

        # The time in minutes for an authentication token to be valid. See "Farmer authentication" in SPECIFICATION.md
        self.authentication_token_timeout: uint8 = pool_config[
            "authentication_token_timeout"]

        # This is where the block rewards will get paid out to. The pool needs to support this address forever,
        # since the farmers will encode it into their singleton on the blockchain. WARNING: the default pool code
        # completely spends this wallet and distributes it to users, do don't put any additional funds in here
        # that you do not want to distribute. Even if the funds are in a different address than this one, they WILL
        # be spent by this code! So only put funds that you want to distribute to pool members here.

        # Using 2164248527
        self.default_target_puzzle_hash: bytes32 = bytes32(
            decode_puzzle_hash(pool_config["default_target_address"]))

        # The pool fees will be sent to this address. This MUST be on a different key than the target_puzzle_hash,
        # otherwise, the fees will be sent to the users. Using 690783650
        self.pool_fee_puzzle_hash: bytes32 = bytes32(
            decode_puzzle_hash(pool_config["pool_fee_address"]))

        # This is the wallet fingerprint and ID for the wallet spending the funds from `self.default_target_puzzle_hash`
        self.wallet_fingerprint = pool_config["wallet_fingerprint"]
        self.wallet_id = pool_config["wallet_id"]

        # We need to check for slow farmers. If farmers cannot submit proofs in time, they won't be able to win
        # any rewards either. This number can be tweaked to be more or less strict. More strict ensures everyone
        # gets high rewards, but it might cause some of the slower farmers to not be able to participate in the pool.
        self.partial_time_limit: int = pool_config["partial_time_limit"]

        # There is always a risk of a reorg, in which case we cannot reward farmers that submitted partials in that
        # reorg. That is why we have a time delay before changing any account points.
        self.partial_confirmation_delay: int = pool_config[
            "partial_confirmation_delay"]

        # These are the phs that we want to look for on chain, that we can claim to our pool
        self.scan_p2_singleton_puzzle_hashes: Set[bytes32] = set()

        # Don't scan anything before this height, for efficiency (for example pool start date)
        self.scan_start_height: uint32 = uint32(
            pool_config["scan_start_height"])

        # Interval for scanning and collecting the pool rewards
        self.collect_pool_rewards_interval = pool_config[
            "collect_pool_rewards_interval"]

        # After this many confirmations, a transaction is considered final and irreversible
        self.confirmation_security_threshold = pool_config[
            "confirmation_security_threshold"]

        # Interval for making payout transactions to farmers
        self.payment_interval = pool_config["payment_interval"]

        # We will not make transactions with more targets than this, to ensure our transaction gets into the blockchain
        # faster.
        self.max_additions_per_transaction = pool_config[
            "max_additions_per_transaction"]

        # This is the list of payments that we have not sent yet, to farmers
        self.pending_payments: Optional[asyncio.Queue] = None

        # Keeps track of the latest state of our node
        self.blockchain_state = {"peak": None}

        # Whether or not the wallet is synced (required to make payments)
        self.wallet_synced = False

        # We target these many partials for this number of seconds. We adjust after receiving this many partials.
        self.number_of_partials_target: int = pool_config[
            "number_of_partials_target"]
        self.time_target: int = pool_config["time_target"]

        # Tasks (infinite While loops) for different purposes
        self.confirm_partials_loop_task: Optional[asyncio.Task] = None
        self.collect_pool_rewards_loop_task: Optional[asyncio.Task] = None
        self.create_payment_loop_task: Optional[asyncio.Task] = None
        self.submit_payment_loop_task: Optional[asyncio.Task] = None
        self.get_peak_loop_task: Optional[asyncio.Task] = None

        self.node_rpc_client: Optional[FullNodeRpcClient] = None
        self.wallet_rpc_client: Optional[WalletRpcClient] = None
Ejemplo n.º 15
0
    async def create(cls, db_wrapper: DBWrapper):
        self = cls()

        # All full blocks which have been added to the blockchain. Header_hash -> block
        self.db_wrapper = db_wrapper
        self.db = db_wrapper.db

        if self.db_wrapper.db_version == 2:

            # TODO: most data in block is duplicated in block_record. The only
            # reason for this is that our parsing of a FullBlock is so slow,
            # it's faster to store duplicate data to parse less when we just
            # need the BlockRecord. Once we fix the parsing (and data structure)
            # of FullBlock, this can use less space
            await self.db.execute(
                "CREATE TABLE IF NOT EXISTS full_blocks("
                "header_hash blob PRIMARY KEY,"
                "prev_hash blob,"
                "height bigint,"
                "sub_epoch_summary blob,"
                "is_fully_compactified tinyint,"
                "in_main_chain tinyint,"
                "block blob,"
                "block_record blob)"
            )

            # This is a single-row table containing the hash of the current
            # peak. The "key" field is there to make update statements simple
            await self.db.execute("CREATE TABLE IF NOT EXISTS current_peak(key int PRIMARY KEY, hash blob)")

            await self.db.execute("CREATE INDEX IF NOT EXISTS height on full_blocks(height)")

            # Sub epoch segments for weight proofs
            await self.db.execute(
                "CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3("
                "ses_block_hash blob PRIMARY KEY,"
                "challenge_segments blob)"
            )

            await self.db.execute(
                "CREATE INDEX IF NOT EXISTS is_fully_compactified ON"
                " full_blocks(is_fully_compactified, in_main_chain) WHERE in_main_chain=1"
            )
            await self.db.execute(
                "CREATE INDEX IF NOT EXISTS main_chain ON full_blocks(height, in_main_chain) WHERE in_main_chain=1"
            )

        else:

            await self.db.execute(
                "CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint,"
                "  is_block tinyint, is_fully_compactified tinyint, block blob)"
            )

            # Block records
            await self.db.execute(
                "CREATE TABLE IF NOT EXISTS block_records(header_hash "
                "text PRIMARY KEY, prev_hash text, height bigint,"
                "block blob, sub_epoch_summary blob, is_peak tinyint, is_block tinyint)"
            )

            # Sub epoch segments for weight proofs
            await self.db.execute(
                "CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(ses_block_hash text PRIMARY KEY,"
                "challenge_segments blob)"
            )

            # Height index so we can look up in order of height for sync purposes
            await self.db.execute("CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)")
            await self.db.execute(
                "CREATE INDEX IF NOT EXISTS is_fully_compactified on full_blocks(is_fully_compactified)"
            )

            await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")

            await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak) where is_peak = 1")

        await self.db.commit()
        self.block_cache = LRUCache(1000)
        self.ses_challenge_cache = LRUCache(50)
        return self
Ejemplo n.º 16
0
                return []
        pairings.append(pairing)

    for i, pairing in enumerate(pairings):
        if pairing is None:
            aug_msg = bytes(pks[i]) + msgs[i]
            aug_hash: G2Element = AugSchemeMPL.g2_from_message(aug_msg)
            pairing = pks[i].pair(aug_hash)

            h = bytes(std_hash(aug_msg))
            cache.put(h, pairing)
            pairings[i] = pairing

    return pairings


LOCAL_CACHE: LRUCache = LRUCache(10000)


def aggregate_verify(pks: List[G1Element],
                     msgs: List[bytes],
                     sig: G2Element,
                     force_cache: bool = False,
                     cache: LRUCache = LOCAL_CACHE):
    pairings: List[GTElement] = get_pairings(cache, pks, msgs, force_cache)
    if len(pairings) == 0:
        return AugSchemeMPL.aggregate_verify(pks, msgs, sig)

    pairings_prod: GTElement = functools.reduce(GTElement.__mul__, pairings)
    return pairings_prod == sig.pair(G1Element.generator())