Esempio n. 1
0
def do_poet0_genesis(args):

    # Get ledger config:
    # ...set the default value of config because argparse 'default' in
    # ...combination with action='append' does the wrong thing.
    if args.config is None:
        args.config = ['txnvalidator.js']
    # ...convert any comma-delimited argument strings to list elements
    for arglist in [args.config]:
        if arglist is not None:
            for arg in arglist:
                if ',' in arg:
                    loc = arglist.index(arg)
                    arglist.pop(loc)
                    for element in reversed(arg.split(',')):
                        arglist.insert(loc, element)
    options_config = ArgparseOptionsConfig([('conf_dir', 'ConfigDirectory'),
                                            ('data_dir', 'DataDirectory'),
                                            ('type', 'LedgerType'),
                                            ('log_config', 'LogConfigFile'),
                                            ('keyfile', 'KeyFile'),
                                            ('node', 'NodeName'),
                                            ('verbose', 'Verbose'),
                                            ('family', 'TransactionFamilies')],
                                           args)
    cfg = get_validator_configuration(args.config, options_config)

    # Obtain Journal object:
    # ...set WaitTimer globals
    target_wait_time = cfg.get("TargetWaitTime")
    initial_wait_time = cfg.get("InitialWaitTime")
    certificate_sample_length = cfg.get('CertificateSampleLength')
    fixed_duration_blocks = cfg.get("FixedDurationBlocks")
    from journal.consensus.poet0.wait_timer import set_wait_timer_globals
    set_wait_timer_globals(
        target_wait_time,
        initial_wait_time,
        certificate_sample_length,
        fixed_duration_blocks,
    )
    # ...build Gossip dependency
    (nd, _) = parse_networking_info(cfg)
    minimum_retries = cfg.get("MinimumRetries")
    retry_interval = cfg.get("RetryInterval")
    gossiper = Gossip(nd, minimum_retries, retry_interval)
    # ...build Journal
    min_txn_per_block = cfg.get("MinimumTransactionsPerBlock")
    max_txn_per_block = cfg.get("MaxTransactionsPerBlock")
    max_txn_age = cfg.get("MaxTxnAge")
    genesis_ledger = cfg.get("GenesisLedger")
    data_directory = cfg.get("DataDirectory")
    store_type = cfg.get("StoreType")
    stat_domains = {}
    from journal.consensus.poet0.poet_consensus import PoetConsensus
    consensus_obj = PoetConsensus(cfg)
    journal = Journal(
        gossiper.LocalNode,
        gossiper,
        gossiper.dispatcher,
        consensus_obj,
        stat_domains,
        minimum_transactions_per_block=min_txn_per_block,
        max_transactions_per_block=max_txn_per_block,
        max_txn_age=max_txn_age,
        genesis_ledger=genesis_ledger,
        data_directory=data_directory,
        store_type=store_type,
    )
    # ...add 'built in' txn families
    default_transaction_families = [endpoint_registry]
    for txn_family in default_transaction_families:
        txn_family.register_transaction_types(journal)
    # ...add auxiliary transaction families
    for txn_family_module_name in cfg.get("TransactionFamilies", []):
        txn_family = importlib.import_module(txn_family_module_name)
        txn_family.register_transaction_types(journal)

    # Make genesis block:
    # ...make sure there is no current chain here, or fail
    # ...create block g_block
    g_block = journal.build_block(genesis=True)
    journal.claim_block(g_block)
    # ...simulate receiving the genesis block msg from reactor to force commit
    g_block_msg = gossiper.IncomingMessageQueue.pop()
    journal.dispatcher.dispatch(g_block_msg)
    journal.initialization_complete()
    head = journal.most_recent_committed_block_id
    chain_len = len(journal.committed_block_ids())

    # Run shutdown:
    # ...persist new state
    journal.shutdown()
    # ...release gossip obj's UDP port
    gossiper.Listener.loseConnection()
    gossiper.Listener.connectionLost(reason=None)

    # Log genesis data, then write it out to ease dissemination
    genesis_data = {
        'GenesisId': head,
        'ChainLength': chain_len,
    }
    gblock_fname = genesis_info_file_name(cfg['DataDirectory'])
    LOGGER.info('genesis data: %s', genesis_data)
    LOGGER.info('writing genesis data to %s', gblock_fname)
    with open(gblock_fname, 'w') as f:
        f.write(json.dumps(genesis_data))
Esempio n. 2
0
def local_main(config, windows_service=False, daemonized=False):
    """
    Implement the actual application logic for starting the
    txnvalidator
    """

    # If this process has been daemonized, then we want to make
    # sure to print out an information message as quickly as possible
    # to the logger for debugging purposes.
    if daemonized:
        logger.info('validator has been daemonized')

    # These imports are delayed because of poor interactions between
    # epoll and fork.  Unfortunately, these import statements set up
    # epoll and we need that to happen after the forking done with
    # Daemonize().  This is a side-effect of importing twisted.
    from twisted.internet import reactor
    from txnserver.validator import parse_networking_info
    from txnserver.validator import Validator
    from txnserver import web_api
    from gossip.gossip_core import GossipException
    from gossip.gossip_core import Gossip

    logger.warn('validator pid is %s', os.getpid())

    consensus_type = config.get('LedgerType', 'poet0')
    stat_domains = {}

    try:
        (node, http_port) = parse_networking_info(config)
        # to construct a validator, we pass it a consensus specific journal
        validator = None
        journal = None
        # Gossip parameters
        minimum_retries = config.get("MinimumRetries")
        retry_interval = config.get("RetryInterval")
        gossip = Gossip(node, minimum_retries, retry_interval, stat_domains)
        # WaitTimer globals
        target_wait_time = config.get("TargetWaitTime")
        initial_wait_time = config.get("InitialWaitTime")
        certificate_sample_length = config.get('CertificateSampleLength')
        fixed_duration_blocks = config.get("FixedDurationBlocks")
        minimum_wait_time = config.get("MinimumWaitTime")
        # Journal parameters
        min_txn_per_block = config.get("MinimumTransactionsPerBlock")
        max_txn_per_block = config.get("MaxTransactionsPerBlock")
        max_txn_age = config.get("MaxTxnAge")
        data_directory = config.get("DataDirectory")
        store_type = config.get("StoreType")

        if consensus_type == 'poet0':
            from sawtooth_validator.consensus.poet0 import poet_consensus
            from sawtooth_validator.consensus.poet0.wait_timer \
                import set_wait_timer_globals
            set_wait_timer_globals(target_wait_time, initial_wait_time,
                                   certificate_sample_length,
                                   fixed_duration_blocks)
            # Continue to pass config to PoetConsensus for possible other
            # enclave implementations - poet_enclave.initialize
            consensus = poet_consensus.PoetConsensus(config)
        elif consensus_type == 'poet1':
            from sawtooth_validator.consensus.poet1 import poet_consensus
            from sawtooth_validator.consensus.poet1.wait_timer \
                import set_wait_timer_globals
            set_wait_timer_globals(target_wait_time, initial_wait_time,
                                   certificate_sample_length,
                                   fixed_duration_blocks, minimum_wait_time)
            # Continue to pass config to PoetConsensus for possible other
            # enclave implementations - poet_enclave.initialize
            consensus = poet_consensus.PoetConsensus(config)
        elif consensus_type == 'quorum':
            quorum = config.get("Quorum")
            nodes = config.get("Nodes")
            vote_time_interval = config.get("VoteTimeInterval")
            ballot_time_interval = config.get("BallotTimeInterval")
            voting_quorum_target_size = config.get("VotingQuorumTargetSize")
            from sawtooth_validator.consensus.quorum import quorum_consensus
            consensus = quorum_consensus.QsuorumConsensus(
                vote_time_interval, ballot_time_interval,
                voting_quorum_target_size, quorum, nodes)
        elif consensus_type == 'dev_mode':
            block_publisher = config.get("DevModePublisher", False)
            block_wait_time = config.get("BlockWaitTime")
            from sawtooth_validator.consensus.dev_mode \
                import dev_mode_consensus
            consensus = dev_mode_consensus.DevModeConsensus(
                block_publisher, block_wait_time)
        else:
            warnings.warn('Unknown consensus type %s' % consensus_type)
            sys.exit(1)

        permissioned_validators =\
            config.get("WhitelistOfPermissionedValidators")

        journal = Journal(gossip.LocalNode, gossip, gossip.dispatcher,
                          consensus, permissioned_validators, stat_domains,
                          min_txn_per_block, max_txn_per_block, max_txn_age,
                          data_directory, store_type)

        validator = Validator(
            gossip,
            journal,
            stat_domains,
            config,
            windows_service=windows_service,
            http_port=http_port,
        )
    except GossipException as e:
        print >> sys.stderr, str(e)
        sys.exit(1)

    listen_info = config.get("Listen", None)
    web_api.initialize_web_server(listen_info, validator)

    # go through the list of transaction families that should be initialized in
    # this validator. the endpoint registry is always included
    if consensus_type == 'poet1':
        from sawtooth_validator.consensus.poet1 import validator_registry
        validator_registry.register_transaction_types(journal)
    for txnfamily in config.get('TransactionFamilies'):
        logger.info("adding transaction family: %s", txnfamily)
        try:
            validator.add_transaction_family(
                importlib.import_module(txnfamily))
        except ImportError:
            warnings.warn("transaction family not found: {}".format(txnfamily))
            sys.exit(1)

    # attempt to restore journal state from persistence
    try:
        validator.journal.restore()
    except KeyError as e:
        logger.error(
            "Config is not compatible with data files"
            " found on restore. Keyerror on %s", e)
        sys.exit(1)

    try:
        validator.pre_start()

        reactor.run(installSignalHandlers=False)
    except KeyboardInterrupt:
        pass
    except SystemExit as e:
        raise e
    except:
        traceback.print_exc(file=sys.stderr)
        sys.exit(1)
Esempio n. 3
0
def do_poet1_genesis(args):
    # Get journal config:
    cfg = mirror_validator_parsing(args)

    # Check for existing block store
    node_name = cfg.get("NodeName")
    data_directory = cfg.get("DataDirectory")
    store_type = cfg.get("StoreType")
    check_for_chain(data_directory, node_name, store_type)

    # Obtain Journal object:
    # ...set WaitTimer globals
    target_wait_time = cfg.get("TargetWaitTime")
    initial_wait_time = cfg.get("InitialWaitTime")
    certificate_sample_length = cfg.get('CertificateSampleLength')
    fixed_duration_blocks = cfg.get("FixedDurationBlocks")
    set_wait_timer_globals(
        target_wait_time,
        initial_wait_time,
        certificate_sample_length,
        fixed_duration_blocks,
    )
    # ...build Gossip dependency
    (nd, _) = parse_networking_info(cfg)
    minimum_retries = cfg.get("MinimumRetries")
    retry_interval = cfg.get("RetryInterval")
    gossiper = Gossip(nd, minimum_retries, retry_interval)
    # ...build Journal
    min_txn_per_block = cfg.get("MinimumTransactionsPerBlock")
    max_txn_per_block = cfg.get("MaxTransactionsPerBlock")
    max_txn_age = cfg.get("MaxTxnAge")
    stat_domains = {}
    consensus_obj = PoetConsensus(cfg)
    journal = Journal(
        gossiper.LocalNode,
        gossiper,
        gossiper.dispatcher,
        consensus_obj,
        stat_domains,
        minimum_transactions_per_block=min_txn_per_block,
        max_transactions_per_block=max_txn_per_block,
        max_txn_age=max_txn_age,
        data_directory=data_directory,
        store_type=store_type,
    )
    # ...add 'built in' txn families
    default_transaction_families = [
        endpoint_registry,
        validator_registry,
    ]
    for txn_family in default_transaction_families:
        txn_family.register_transaction_types(journal)
    # ...add auxiliary transaction families
    for txn_family_module_name in cfg.get("TransactionFamilies", []):
        txn_family = importlib.import_module(txn_family_module_name)
        txn_family.register_transaction_types(journal)

    # Make genesis block:
    # ...make sure there is no current chain here, or fail
    # ...pop VR seed (we'll presently defer resolving VR seed issues)
    vr_seed = gossiper.IncomingMessageQueue.pop()
    journal.initial_transactions.append(vr_seed.Transaction)
    # ...create block g_block (including VR seed txn just popped)
    journal.on_genesis_block.fire(journal)
    journal.initializing = False
    for txn in journal.initial_transactions:
        journal.add_pending_transaction(txn, build_block=False)
    g_block = journal.build_block(genesis=True)  # seed later...
    journal.claim_block(g_block)
    # ...simulate receiving the genesis block msg from reactor to force commit
    g_block_msg = gossiper.IncomingMessageQueue.pop()
    poet_public_key = g_block.poet_public_key
    journal.dispatcher.dispatch(g_block_msg)
    journal.initialization_complete()
    head = journal.most_recent_committed_block_id
    chain_len = len(journal.committed_block_ids())

    # Run shutdown:
    # ...persist new state
    journal.shutdown()
    # ...release gossip obj's UDP port
    gossiper.Listener.loseConnection()
    gossiper.Listener.connectionLost(reason=None)

    # Log genesis data, then write it out to ease dissemination
    genesis_data = {
        'GenesisId': head,
        'ChainLength': chain_len,
        'PoetPublicKey': poet_public_key,
    }
    gblock_fname = genesis_info_file_name(cfg['DataDirectory'])
    LOGGER.info('genesis data: %s', genesis_data)
    LOGGER.info('writing genesis data to %s', gblock_fname)
    with open(gblock_fname, 'w') as f:
        f.write(json.dumps(genesis_data, indent=4))
def do_dev_mode_genesis(args):
    # Get journal config:
    cfg = mirror_validator_parsing(args)

    # Check for existing block store
    node_name = cfg.get("NodeName")
    data_directory = cfg.get("DataDirectory")
    store_type = cfg.get("StoreType")
    check_for_chain(data_directory, node_name, store_type)

    # Obtain Journal object:
    # ...build Gossip dependency
    (nd, _) = parse_networking_info(cfg)
    minimum_retries = cfg.get("MinimumRetries")
    retry_interval = cfg.get("RetryInterval")
    gossiper = Gossip(nd, minimum_retries, retry_interval)
    # ...build Journal
    min_txn_per_block = cfg.get("MinimumTransactionsPerBlock")
    max_txn_per_block = cfg.get("MaxTransactionsPerBlock")
    max_txn_age = cfg.get("MaxTxnAge")
    stat_domains = {}
    consensus_obj = DevModeConsensus(block_publisher=True,
                                     block_wait_time=cfg.get('BlockWaitTime'))
    journal = Journal(gossiper.LocalNode,
                      gossiper,
                      gossiper.dispatcher,
                      consensus_obj,
                      stat_domains,
                      minimum_transactions_per_block=min_txn_per_block,
                      max_transactions_per_block=max_txn_per_block,
                      max_txn_age=max_txn_age,
                      data_directory=data_directory,
                      store_type=store_type,
                      )
    # ...add 'built in' txn families
    default_transaction_families = [
        endpoint_registry
    ]
    for txn_family in default_transaction_families:
        txn_family.register_transaction_types(journal)
    # ...add auxiliary transaction families
    for txn_family_module_name in cfg.get("TransactionFamilies", []):
        txn_family = importlib.import_module(txn_family_module_name)
        txn_family.register_transaction_types(journal)

    # Make genesis block:
    # ...make sure there is no current chain here, or fail
    # ...create block g_block
    journal.on_genesis_block.fire(journal)
    journal.initializing = False
    for txn in journal.initial_transactions:
        journal.add_pending_transaction(txn, build_block=False)
    g_block = journal.build_block(genesis=True)
    journal.claim_block(g_block)
    # ...simulate receiving the genesis block msg from reactor to force commit
    g_block_msg = gossiper.IncomingMessageQueue.pop()
    journal.dispatcher.dispatch(g_block_msg)
    journal.initialization_complete()
    head = journal.most_recent_committed_block_id
    chain_len = len(journal.committed_block_ids())

    # Run shutdown:
    # ...persist new state
    journal.shutdown()
    # ...release gossip obj's UDP port
    gossiper.Listener.loseConnection()
    gossiper.Listener.connectionLost(reason=None)

    # Log genesis data, then write it out to ease dissemination
    genesis_data = {
        'GenesisId': head,
        'ChainLength': chain_len,
    }
    gblock_fname = genesis_info_file_name(cfg['DataDirectory'])
    LOGGER.info('genesis data: %s', genesis_data)
    LOGGER.info('writing genesis data to %s', gblock_fname)
    with open(gblock_fname, 'w') as f:
        f.write(json.dumps(genesis_data))
Esempio n. 5
0
def local_main(config, windows_service=False, daemonized=False):
    """
    Implement the actual application logic for starting the
    txnvalidator
    """

    # If this process has been daemonized, then we want to make
    # sure to print out an information message as quickly as possible
    # to the logger for debugging purposes.
    if daemonized:
        logger.info('validator has been daemonized')

    # These imports are delayed because of poor interactions between
    # epoll and fork.  Unfortunately, these import statements set up
    # epoll and we need that to happen after the forking done with
    # Daemonize().  This is a side-effect of importing twisted.
    from twisted.internet import reactor
    from txnserver.validator import parse_networking_info
    from txnserver.validator import Validator
    from txnserver import quorum_validator
    from txnserver import web_api
    from gossip.gossip_core import GossipException

    logger.warn('validator pid is %s', os.getpid())

    ledgertype = config.get('LedgerType', 'poet0')

    try:
        (node, http_port) = parse_networking_info(config)
        # to construct a validator, we pass it a consensus specific ledger
        validator = None
        ledger = None
        # Gossip parameters
        minimum_retries = config.get("MinimumRetries")
        retry_interval = config.get("RetryInterval")
        gossip = Gossip(node, minimum_retries, retry_interval)
        # WaitTimer globals
        target_wait_time = config.get("TargetWaitTime")
        initial_wait_time = config.get("InitialWaitTime")
        certificate_sample_length = config.get('CertificateSampleLength')
        fixed_duration_blocks = config.get("FixedDurationBlocks")
        minimum_wait_time = config.get("MinimumWaitTime")
        # Journal parameters
        min_txn_per_block = config.get("MinimumTransactionsPerBlock")
        max_txn_per_block = config.get("MaxTransactionsPerBlock")
        max_txn_age = config.get("MaxTxnAge")
        genesis_ledger = config.get("GenesisLedger")
        restore = config.get("Restore")
        data_directory = config.get("DataDirectory")
        store_type = config.get("StoreType")

        if ledgertype == 'poet0':
            from journal.consensus.poet0 import poet_journal
            from journal.consensus.poet0.wait_timer \
                import set_wait_timer_globals
            set_wait_timer_globals(target_wait_time, initial_wait_time,
                                   certificate_sample_length,
                                   fixed_duration_blocks)
            # Continue to pass config to PoetJournal for possible other enclave
            # implmentations - poet_enclave.initialize
            ledger = poet_journal.PoetJournal(gossip, config,
                                              min_txn_per_block,
                                              max_txn_per_block, max_txn_age,
                                              genesis_ledger, restore,
                                              data_directory, store_type)
        elif ledgertype == 'poet1':
            from journal.consensus.poet1 import poet_journal
            from journal.consensus.poet1.wait_timer \
                import set_wait_timer_globals
            set_wait_timer_globals(target_wait_time, initial_wait_time,
                                   certificate_sample_length,
                                   fixed_duration_blocks, minimum_wait_time)
            # Continue to pass config to PoetJournal for possible other enclave
            # implmentations - poet_enclave.initialize
            ledger = poet_journal.PoetJournal(gossip, config,
                                              min_txn_per_block,
                                              max_txn_per_block, max_txn_age,
                                              genesis_ledger, restore,
                                              data_directory, store_type)
        elif ledgertype == 'quorum':
            quorum = config.get("Quorum")
            nodes = config.get("Nodes")
            vote_time_interval = config.get("VoteTimeInterval")
            ballot_time_interval = config.get("BallotTimeInterval")
            voting_quorum_target_size = config.get("VotingQuorumTargetSize")
            from journal.consensus.quorum import quorum_journal
            ledger = quorum_journal.QuorumJournal(
                gossip, min_txn_per_block, max_txn_per_block, max_txn_age,
                genesis_ledger, restore, data_directory, store_type,
                vote_time_interval, ballot_time_interval,
                voting_quorum_target_size)
            ledger.initialize_quorum_map(quorum, nodes)
            # quorum validator is still sub-classed for now...
            validator = quorum_validator.QuorumValidator(
                gossip,
                ledger,
                config,
                windows_service=windows_service,
                http_port=http_port)
        elif ledgertype == 'dev_mode':
            block_wait_time = config.get("BlockWaitTime")
            from journal.consensus.dev_mode import dev_mode_journal
            ledger = dev_mode_journal.DevModeJournal(
                gossip, min_txn_per_block, max_txn_per_block, max_txn_age,
                genesis_ledger, restore, data_directory, store_type,
                block_wait_time)
        else:
            warnings.warn('Unknown ledger type %s' % ledgertype)
            sys.exit(1)
        if validator is None:
            # null-check until we get rid of QuorumValidator subclass
            validator = Validator(
                gossip,
                ledger,
                config,
                windows_service=windows_service,
                http_port=http_port,
            )
    except GossipException as e:
        print >> sys.stderr, str(e)
        sys.exit(1)

    listen_info = config.get("Listen", None)
    web_api.initialize_web_server(listen_info, validator)

    # go through the list of transaction families that should be initialized in
    # this validator. the endpoint registry is always included
    for txnfamily in config.get('TransactionFamilies'):
        logger.info("adding transaction family: %s", txnfamily)
        try:
            validator.add_transaction_family(
                importlib.import_module(txnfamily))
        except ImportError:
            warnings.warn("transaction family not found: {}".format(txnfamily))
            sys.exit(1)

    try:
        validator.pre_start()

        reactor.run()
    except KeyboardInterrupt:
        pass
    except SystemExit as e:
        raise e
    except:
        traceback.print_exc(file=sys.stderr)
        sys.exit(1)
Esempio n. 6
0
def do_poet1_genesis(args):
    # Get journal config:
    cfg = mirror_validator_parsing(args)

    # Check for existing block store
    node_name = cfg.get("NodeName")
    data_directory = cfg.get("DataDirectory")
    store_type = cfg.get("StoreType")
    check_for_chain(data_directory, node_name, store_type)

    # Obtain Journal object:
    # ...set WaitTimer globals
    target_wait_time = cfg.get("TargetWaitTime")
    initial_wait_time = cfg.get("InitialWaitTime")
    certificate_sample_length = cfg.get('CertificateSampleLength')
    fixed_duration_blocks = cfg.get("FixedDurationBlocks")
    set_wait_timer_globals(target_wait_time,
                           initial_wait_time,
                           certificate_sample_length,
                           fixed_duration_blocks,
                           )
    # ...build Gossip dependency
    (nd, _) = parse_networking_info(cfg)
    minimum_retries = cfg.get("MinimumRetries")
    retry_interval = cfg.get("RetryInterval")
    gossiper = Gossip(nd, minimum_retries, retry_interval)
    # ...build Journal
    min_txn_per_block = cfg.get("MinimumTransactionsPerBlock")
    max_txn_per_block = cfg.get("MaxTransactionsPerBlock")
    max_txn_age = cfg.get("MaxTxnAge")
    stat_domains = {}
    consensus_obj = PoetConsensus(cfg)

    journal = Journal(gossiper.LocalNode,
                      gossiper,
                      gossiper.dispatcher,
                      consensus_obj,
                      stat_domains,
                      minimum_transactions_per_block=min_txn_per_block,
                      max_transactions_per_block=max_txn_per_block,
                      max_txn_age=max_txn_age,
                      data_directory=data_directory,
                      store_type=store_type,
                      )
    # ...add 'built in' txn families
    default_transaction_families = [
        endpoint_registry,
        validator_registry,
    ]
    for txn_family in default_transaction_families:
        txn_family.register_transaction_types(journal)
    # ...add auxiliary transaction families
    for txn_family_module_name in cfg.get("TransactionFamilies", []):
        txn_family = importlib.import_module(txn_family_module_name)
        txn_family.register_transaction_types(journal)

    # Make genesis block:
    consensus_obj.register_signup_information(journal=journal)

    # ...make sure there is no current chain here, or fail
    # ...pop VR seed (we'll presently defer resolving VR seed issues)
    vr_seed = gossiper.IncomingMessageQueue.pop()
    journal.initial_transactions.append(vr_seed.Transaction)
    # ...create block g_block (including VR seed txn just popped)
    journal.on_genesis_block.fire(journal)
    journal.initializing = False
    for txn in journal.initial_transactions:
        journal.add_pending_transaction(txn, build_block=False)
    g_block = journal.build_block(genesis=True)  # seed later...
    journal.claim_block(g_block)
    # ...simulate receiving the genesis block msg from reactor to force commit
    g_block_msg = gossiper.IncomingMessageQueue.pop()
    poet_public_key = g_block.poet_public_key
    journal.dispatcher.dispatch(g_block_msg)
    journal.initialization_complete()
    head = journal.most_recent_committed_block_id
    chain_len = len(journal.committed_block_ids())

    # Run shutdown:
    # ...persist new state
    journal.shutdown()
    # ...release gossip obj's UDP port
    gossiper.Listener.loseConnection()
    gossiper.Listener.connectionLost(reason=None)

    # Log genesis data, then write it out to ease dissemination
    genesis_data = {
        'GenesisId': head,
        'ChainLength': chain_len,
        'PoetPublicKey': poet_public_key,
    }
    gblock_fname = genesis_info_file_name(cfg['DataDirectory'])
    LOGGER.info('genesis data: %s', genesis_data)
    LOGGER.info('writing genesis data to %s', gblock_fname)
    with open(gblock_fname, 'w') as f:
        f.write(json.dumps(genesis_data, indent=4))
Esempio n. 7
0
def do_poet0_genesis(args):

    # Get ledger config:
    # ...set the default value of config because argparse 'default' in
    # ...combination with action='append' does the wrong thing.
    if args.config is None:
        args.config = ['txnvalidator.js']
    # ...convert any comma-delimited argument strings to list elements
    for arglist in [args.config]:
        if arglist is not None:
            for arg in arglist:
                if ',' in arg:
                    loc = arglist.index(arg)
                    arglist.pop(loc)
                    for element in reversed(arg.split(',')):
                        arglist.insert(loc, element)
    options_config = ArgparseOptionsConfig(
        [
            ('conf_dir', 'ConfigDirectory'),
            ('data_dir', 'DataDirectory'),
            ('type', 'LedgerType'),
            ('log_config', 'LogConfigFile'),
            ('keyfile', 'KeyFile'),
            ('node', 'NodeName'),
            ('verbose', 'Verbose'),
            ('family', 'TransactionFamilies')
        ], args)
    cfg = get_validator_configuration(args.config, options_config)

    # Obtain Journal object:
    # ...set WaitTimer globals
    target_wait_time = cfg.get("TargetWaitTime")
    initial_wait_time = cfg.get("InitialWaitTime")
    certificate_sample_length = cfg.get('CertificateSampleLength')
    fixed_duration_blocks = cfg.get("FixedDurationBlocks")
    from journal.consensus.poet0.wait_timer import set_wait_timer_globals
    set_wait_timer_globals(target_wait_time,
                           initial_wait_time,
                           certificate_sample_length,
                           fixed_duration_blocks,
                           )
    # ...build Gossip dependency
    (nd, _) = parse_networking_info(cfg)
    minimum_retries = cfg.get("MinimumRetries")
    retry_interval = cfg.get("RetryInterval")
    gossiper = Gossip(nd, minimum_retries, retry_interval)
    # ...build Journal
    min_txn_per_block = cfg.get("MinimumTransactionsPerBlock")
    max_txn_per_block = cfg.get("MaxTransactionsPerBlock")
    max_txn_age = cfg.get("MaxTxnAge")
    genesis_ledger = cfg.get("GenesisLedger")
    data_directory = cfg.get("DataDirectory")
    store_type = cfg.get("StoreType")
    stat_domains = {}
    from journal.consensus.poet0.poet_consensus import PoetConsensus
    consensus_obj = PoetConsensus(cfg)
    journal = Journal(gossiper.LocalNode,
                      gossiper,
                      gossiper.dispatcher,
                      consensus_obj,
                      stat_domains,
                      minimum_transactions_per_block=min_txn_per_block,
                      max_transactions_per_block=max_txn_per_block,
                      max_txn_age=max_txn_age,
                      genesis_ledger=genesis_ledger,
                      data_directory=data_directory,
                      store_type=store_type,
                      )
    # ...add 'built in' txn families
    default_transaction_families = [
        endpoint_registry
    ]
    for txn_family in default_transaction_families:
        txn_family.register_transaction_types(journal)
    # ...add auxiliary transaction families
    for txn_family_module_name in cfg.get("TransactionFamilies", []):
        txn_family = importlib.import_module(txn_family_module_name)
        txn_family.register_transaction_types(journal)

    # Make genesis block:
    # ...make sure there is no current chain here, or fail
    # ...create block g_block
    g_block = journal.build_block(genesis=True)
    journal.claim_block(g_block)
    # ...simulate receiving the genesis block msg from reactor to force commit
    g_block_msg = gossiper.IncomingMessageQueue.pop()
    journal.dispatcher.dispatch(g_block_msg)
    journal.initialization_complete()
    head = journal.most_recent_committed_block_id
    chain_len = len(journal.committed_block_ids())

    # Run shutdown:
    # ...persist new state
    journal.shutdown()
    # ...release gossip obj's UDP port
    gossiper.Listener.loseConnection()
    gossiper.Listener.connectionLost(reason=None)

    # Log genesis data, then write it out to ease dissemination
    genesis_data = {
        'GenesisId': head,
        'ChainLength': chain_len,
    }
    gblock_fname = genesis_info_file_name(cfg['DataDirectory'])
    LOGGER.info('genesis data: %s', genesis_data)
    LOGGER.info('writing genesis data to %s', gblock_fname)
    with open(gblock_fname, 'w') as f:
        f.write(json.dumps(genesis_data))
Esempio n. 8
0
def local_main(config, windows_service=False, daemonized=False):
    """
    Implement the actual application logic for starting the
    txnvalidator
    """

    # If this process has been daemonized, then we want to make
    # sure to print out an information message as quickly as possible
    # to the logger for debugging purposes.
    if daemonized:
        logger.info('validator has been daemonized')

    # These imports are delayed because of poor interactions between
    # epoll and fork.  Unfortunately, these import statements set up
    # epoll and we need that to happen after the forking done with
    # Daemonize().  This is a side-effect of importing twisted.
    from twisted.internet import reactor
    from txnserver.validator import parse_networking_info
    from txnserver.validator import Validator
    from txnserver import quorum_validator
    from txnserver import web_api
    from gossip.gossip_core import GossipException

    logger.warn('validator pid is %s', os.getpid())

    ledgertype = config.get('LedgerType', 'poet0')

    validator = None

    try:
        (nd, http_port) = parse_networking_info(config)
        # to construct a validator, we pass it a consensus specific ledger
        validator = None
        ledger = None
        ep_domain = None
        if ledgertype == 'poet0':
            from journal.consensus.poet0 import poet_journal
            set_wait_timer_globals(config)
            ledger = poet_journal.PoetJournal(nd, **config)
            ep_domain = '/PoetValidator'
        elif ledgertype == 'quorum':
            from journal.consensus.quorum import quorum_journal
            ledger = quorum_journal.QuorumJournal(nd, **config)
            ledger.initialize_quorum_map(config)
            # quorum validator is still sub-classed for now...
            validator = quorum_validator.QuorumValidator(
                nd,
                ledger,
                config,
                windows_service=windows_service,
                http_port=http_port)
            validator.EndpointDomain = '/QuorumValidator'
        elif ledgertype == 'dev_mode':
            from journal.consensus.dev_mode import dev_mode_journal
            set_wait_timer_globals(config)
            ledger = dev_mode_journal.DevModeJournal(nd, **config)
            ep_domain = '/DevModeValidator'
        else:
            warnings.warn('Unknown ledger type %s' % ledgertype)
            sys.exit(1)
        if validator is None:
            # null-check until we get rid of QuorumValidator subclass
            validator = Validator(
                nd,
                ledger,
                config,
                windows_service=windows_service,
                http_port=http_port,
            )
            validator.EndpointDomain = ep_domain
    except GossipException as e:
        print >> sys.stderr, str(e)
        sys.exit(1)

    web_api.initialize_web_server(config, validator)

    # go through the list of transaction families that should be initialized in
    # this validator. the endpoint registry is always included
    for txnfamily in config.get('TransactionFamilies'):
        logger.info("adding transaction family: %s", txnfamily)
        try:
            validator.add_transaction_family(
                importlib.import_module(txnfamily))
        except ImportError:
            warnings.warn("transaction family not found: {}".format(txnfamily))
            sys.exit(1)

    try:
        validator.pre_start()

        reactor.run()
    except KeyboardInterrupt:
        pass
    except SystemExit as e:
        raise e
    except:
        traceback.print_exc(file=sys.stderr)
        sys.exit(1)
Esempio n. 9
0
def local_main(config, windows_service=False, daemonized=False):
    """
    Implement the actual application logic for starting the
    txnvalidator
    """

    # If this process has been daemonized, then we want to make
    # sure to print out an information message as quickly as possible
    # to the logger for debugging purposes.
    if daemonized:
        logger.info('validator has been daemonized')

    # These imports are delayed because of poor interactions between
    # epoll and fork.  Unfortunately, these import statements set up
    # epoll and we need that to happen after the forking done with
    # Daemonize().  This is a side-effect of importing twisted.
    from twisted.internet import reactor
    from txnserver.validator import parse_networking_info
    from txnserver.validator import Validator
    from txnserver import web_api
    from gossip.gossip_core import GossipException
    from gossip.gossip_core import Gossip
    from journal.journal_core import Journal

    logger.warn('validator pid is %s', os.getpid())

    consensus_type = config.get('LedgerType', 'poet1')
    stat_domains = {}

    try:
        (node, http_port) = parse_networking_info(config)
        # to construct a validator, we pass it a consensus specific journal
        validator = None
        journal = None
        # Gossip parameters
        minimum_retries = config.get("MinimumRetries")
        retry_interval = config.get("RetryInterval")
        gossip = Gossip(node, minimum_retries, retry_interval, stat_domains)
        # WaitTimer globals
        target_wait_time = config.get("TargetWaitTime")
        initial_wait_time = config.get("InitialWaitTime")
        certificate_sample_length = config.get('CertificateSampleLength')
        fixed_duration_blocks = config.get("FixedDurationBlocks")
        minimum_wait_time = config.get("MinimumWaitTime")
        # Journal parameters
        min_txn_per_block = config.get("MinimumTransactionsPerBlock")
        max_txn_per_block = config.get("MaxTransactionsPerBlock")
        max_txn_age = config.get("MaxTxnAge")
        data_directory = config.get("DataDirectory")
        store_type = config.get("StoreType")

        if consensus_type == 'poet1':
            from sawtooth_validator.consensus.poet1 import poet_consensus
            from sawtooth_validator.consensus.poet1.wait_timer \
                import set_wait_timer_globals
            set_wait_timer_globals(target_wait_time,
                                   initial_wait_time,
                                   certificate_sample_length,
                                   fixed_duration_blocks,
                                   minimum_wait_time)
            # Continue to pass config to PoetConsensus for possible other
            # enclave implementations - poet_enclave.initialize
            consensus = poet_consensus.PoetConsensus(config)
        elif consensus_type == 'dev_mode':
            block_publisher = config.get("DevModePublisher", False)
            block_wait_time = config.get("BlockWaitTime")
            from sawtooth_validator.consensus.dev_mode \
                import dev_mode_consensus
            consensus = dev_mode_consensus.DevModeConsensus(
                block_publisher,
                block_wait_time)
        else:
            warnings.warn('Unknown consensus type %s' % consensus_type)
            sys.exit(1)

        permissioned_validators =\
            config.get("WhitelistOfPermissionedValidators")

        journal = Journal(
            gossip.LocalNode,
            gossip,
            gossip.dispatcher,
            consensus,
            permissioned_validators,
            stat_domains,
            min_txn_per_block,
            max_txn_per_block,
            max_txn_age,
            data_directory,
            store_type)

        validator = Validator(
            gossip,
            journal,
            stat_domains,
            config,
            windows_service=windows_service,
            http_port=http_port,
        )
    except GossipException as e:
        print(str(e), file=sys.stderr)
        sys.exit(1)

    listen_info = config.get("Listen", None)
    web_api.initialize_web_server(listen_info, validator)

    # go through the list of transaction families that should be initialized in
    # this validator. the endpoint registry is always included
    if consensus_type == 'poet1':
        from sawtooth_validator.consensus.poet1 import validator_registry
        validator_registry.register_transaction_types(journal)
    for txnfamily in config.get('TransactionFamilies'):
        logger.info("adding transaction family: %s", txnfamily)
        try:
            validator.add_transaction_family(
                importlib.import_module(txnfamily))
        except ImportError:
            warnings.warn("transaction family not found: {}".format(txnfamily))
            sys.exit(1)

    # attempt to restore journal state from persistence
    try:
        validator.journal.restore()
    except KeyError as e:
        logger.error("Config is not compatible with data files"
                     " found on restore. Keyerror on %s", e)
        sys.exit(1)

    try:
        validator.pre_start()

        reactor.run(installSignalHandlers=False)
    except KeyboardInterrupt:
        pass
    except SystemExit as e:
        raise e
    except:
        traceback.print_exc(file=sys.stderr)
        sys.exit(1)
Esempio n. 10
0
def do_poet0_genesis(args):

    # Get ledger config:
    # set the default value of config because argparse 'default' in
    # combination with action='append' does the wrong thing.
    if args.config is None:
        args.config = ['txnvalidator.js']
    # convert any comma-delimited argument strings to list elements
    for arglist in [args.config]:
        if arglist is not None:
            for arg in arglist:
                if ',' in arg:
                    loc = arglist.index(arg)
                    arglist.pop(loc)
                    for element in reversed(arg.split(',')):
                        arglist.insert(loc, element)
    options_config = ArgparseOptionsConfig(
        [
            ('conf_dir', 'ConfigDirectory'),
            ('data_dir', 'DataDirectory'),
            ('type', 'LedgerType'),
            ('log_config', 'LogConfigFile'),
            ('keyfile', 'KeyFile'),
            ('node', 'NodeName'),
            ('verbose', 'Verbose'),
            ('family', 'TransactionFamilies')
        ], args)
    cfg = get_validator_configuration(args.config, options_config)

    # Perform requisite overrides and validation:
    cfg['GenesisLedger'] = True
    # should check that sigining key exists...
    # debug report
    for key, value in cfg.iteritems():
        LOGGER.debug("CONFIG: %s = %s", key, value)

    # set WaitTimer globals
    target_wait_time = cfg.get("TargetWaitTime")
    initial_wait_time = cfg.get("InitialWaitTime")
    certificate_sample_length = cfg.get('CertificateSampleLength')
    fixed_duration_blocks = cfg.get("FixedDurationBlocks")
    from journal.consensus.poet0.wait_timer \
        import set_wait_timer_globals
    set_wait_timer_globals(target_wait_time,
                           initial_wait_time,
                           certificate_sample_length,
                           fixed_duration_blocks,
                           )

    # build gossiper
    (nd, _) = parse_networking_info(cfg)
    minimum_retries = cfg.get("MinimumRetries")
    retry_interval = cfg.get("RetryInterval")
    gossiper = Gossip(nd, minimum_retries, retry_interval)

    # build journal
    min_txn_per_block = cfg.get("MinimumTransactionsPerBlock")
    max_txn_per_block = cfg.get("MaxTransactionsPerBlock")
    max_txn_age = cfg.get("MaxTxnAge")
    genesis_ledger = cfg.get("GenesisLedger")
    data_directory = cfg.get("DataDirectory")
    store_type = cfg.get("StoreType")

    stat_domains = {}

    # in future, dynamically select consensus obj based on ConsensusType
    journal = Journal(gossiper.LocalNode,
                      gossiper,
                      gossiper.dispatcher,
                      PoetConsensus(cfg),
                      stat_domains,
                      minimum_transactions_per_block=min_txn_per_block,
                      max_transactions_per_block=max_txn_per_block,
                      max_txn_age=max_txn_age,
                      genesis_ledger=genesis_ledger,
                      data_directory=data_directory,
                      store_type=store_type,
                      )
    # may need to add transaction family objects ad hoc from cfg
    dfl_txn_families = [endpoint_registry, integer_key]
    for txnfamily in dfl_txn_families:
        txnfamily.register_transaction_types(journal)
    # ...skipping onNodeDisconnect handler (using ledger, not validator...)

    # Create genesis block:
    # we should make sure there is no current chain here, or fail
    # calling initialization_complete will create the genesis block
    journal.initialization_complete()
    # simulate receiving the genesis block msg from reactor to force commit
    msg = journal.gossip.IncomingMessageQueue.pop()
    (_, msg_handler) = journal.dispatcher.message_handler_map[msg.MessageType]
    msg_handler(msg, journal)

    # Gather data, then shutdown to save state:
    head = journal.most_recent_committed_block_id
    # ...not sure why n_blocks is experimentally 0 and not 1
    # ...if we only make the genesis, it would be good to check n_blks = 1
    n_blks = journal.committed_block_count
    journal.shutdown()

    # log genesis data, then write it out to ease dissemination
    genesis_data = {
        'GenesisId': head,
        'ChainLength': n_blks,
    }
    gblock_fname = get_genesis_block_id_file_name(cfg['DataDirectory'])
    LOGGER.info('genesis data: %s', genesis_data)
    LOGGER.info('writing genesis data to %s', gblock_fname)
    with open(gblock_fname, 'w') as f:
        f.write(json.dumps(genesis_data))