def _start_all_nodes( self, args, recovery=False, ledger_dir=None, read_only_ledger_dir=None, snapshot_dir=None, ): hosts = self.hosts if not args.package: raise ValueError("A package name must be specified.") self.status = ServiceStatus.OPENING LOG.info("Opening CCF service on {}".format(hosts)) forwarded_args = { arg: getattr(args, arg) for arg in infra.network.Network.node_args_to_forward } for i, node in enumerate(self.nodes): try: if i == 0: if not recovery: node.start( lib_name=args.package, workspace=args.workspace, label=args.label, common_dir=self.common_dir, members_info=self.consortium.get_members_info(), **forwarded_args, ) else: node.recover( lib_name=args.package, workspace=args.workspace, label=args.label, common_dir=self.common_dir, ledger_dir=ledger_dir, read_only_ledger_dir=read_only_ledger_dir, snapshot_dir=snapshot_dir, **forwarded_args, ) self.wait_for_state( node, "partOfPublicNetwork", timeout=args.ledger_recovery_timeout, ) else: # When a new service is started, initial nodes join without a snapshot self._add_node( node, args.package, args, recovery=recovery, ledger_dir=ledger_dir, from_snapshot=snapshot_dir is not None, read_only_ledger_dir=read_only_ledger_dir, snapshot_dir=snapshot_dir, ) except Exception: LOG.exception("Failed to start node {}".format(node.node_id)) raise self.election_duration = (args.bft_view_change_timeout_ms / 1000 if args.consensus == "bft" else args.raft_election_timeout_ms / 1000) * 2 LOG.info("All nodes started") # Here, recovery nodes might still be catching up, and possibly swamp # the current primary which would not be able to serve user requests primary, _ = self.find_primary( timeout=args.ledger_recovery_timeout if recovery else 3) return primary
def _start_all_nodes(self, args, recovery=False, ledger_dir=None): hosts = self.hosts if not args.package: raise ValueError("A package name must be specified.") self.status = ServiceStatus.OPENING LOG.info("Opening CCF service on {}".format(hosts)) forwarded_args = { arg: getattr(args, arg) for arg in infra.network.Network.node_args_to_forward } for i, node in enumerate(self.nodes): try: if i == 0: if not recovery: node.start( lib_name=args.package, workspace=args.workspace, label=args.label, common_dir=self.common_dir, members_info=self.consortium.get_members_info(), **forwarded_args, ) else: node.recover( lib_name=args.package, ledger_dir=ledger_dir, workspace=args.workspace, label=args.label, common_dir=self.common_dir, **forwarded_args, ) # When a recovery network in started without an existing network, # it is not possible to know the local node IDs before the first # node is started and has recovered the ledger. The local node IDs # are adjusted accordingly then. if self.existing_network is None: self.wait_for_state( node, "partOfPublicNetwork", timeout=args.ledger_recovery_timeout, ) self._adjust_local_node_ids(node) else: self._add_node(node, args.package, args, recovery=recovery) except Exception: LOG.exception("Failed to start node {}".format(node.node_id)) raise self.election_duration = (args.pbft_view_change_timeout / 1000 if args.consensus == "pbft" else args.raft_election_timeout / 1000) LOG.info("All nodes started") # Here, recovery nodes might still be catching up, and possibly swamp # the current primary which would not be able to serve user requests primary, _ = self.find_primary( timeout=args.ledger_recovery_timeout if recovery else 3) return primary