def _start_all_nodes(self, args, recovery=False, ledger_dir=None): hosts = self.hosts if not args.package: raise ValueError("A package name must be specified.") self.status = ServiceStatus.OPENING LOG.info("Opening CCF service on {}".format(hosts)) forwarded_args = { arg: getattr(args, arg) for arg in infra.ccf.Network.node_args_to_forward } for i, node in enumerate(self.nodes): try: if i == 0: if not recovery: node.start( lib_name=args.package, workspace=args.workspace, label=args.label, common_dir=self.common_dir, members_info=self.consortium.get_members_info(), **forwarded_args, ) else: node.recover( lib_name=args.package, ledger_dir=ledger_dir, workspace=args.workspace, label=args.label, common_dir=self.common_dir, **forwarded_args, ) # When a recovery network in started without an existing network, # it is not possible to know the local node IDs before the first # node is started and has recovered the ledger. The local node IDs # are adjusted accordingly then. if self.existing_network is None: self.wait_for_state( node, "partOfPublicNetwork", timeout=args.ledger_recovery_timeout, ) self._adjust_local_node_ids(node) else: self._add_node(node, args.package, args, recovery=recovery) except Exception: LOG.exception("Failed to start node {}".format(node.node_id)) raise self.election_duration = (args.pbft_view_change_timeout * 2 / 1000 if args.consensus == "pbft" else args.raft_election_timeout * 2 / 1000) LOG.info("All nodes started") primary, _ = self.find_primary() return primary
def _start_all_nodes(self, args, recovery=False, ledger_file=None, sealed_secrets=None): hosts = self.hosts or ["localhost"] * number_of_local.nodes() if not args.package: raise ValueError("A package name must be specified.") self.status = ServiceStatus.OPENING LOG.info("Opening CCF service on {}".format(hosts)) forwarded_args = { arg: getattr(args, arg) for arg in infra.ccf.Network.node_args_to_forward } for i, node in enumerate(self.nodes): dict_args = vars(args) forwarded_args = { arg: dict_args[arg] for arg in Network.node_args_to_forward } try: if i == 0: if not recovery: node.start( lib_name=args.package, workspace=args.workspace, label=args.label, common_dir=self.common_dir, members_info=self.consortium.get_members_info(), **forwarded_args, ) else: node.recover( lib_name=args.package, ledger_file=ledger_file, sealed_secrets=sealed_secrets, workspace=args.workspace, label=args.label, common_dir=self.common_dir, **forwarded_args, ) else: self._add_node(node, args.package, args) except Exception: LOG.exception("Failed to start node {}".format(i)) raise LOG.info("All remotes started") primary, term = self.find_primary() self.consortium.check_for_service(primary, status=ServiceStatus.OPENING) return primary
def _start_all_nodes( self, args, recovery=False, ledger_dir=None, read_only_ledger_dir=None, snapshot_dir=None, ): hosts = self.hosts if not args.package: raise ValueError("A package name must be specified.") self.status = ServiceStatus.OPENING LOG.info("Opening CCF service on {}".format(hosts)) forwarded_args = { arg: getattr(args, arg) for arg in infra.network.Network.node_args_to_forward } for i, node in enumerate(self.nodes): try: if i == 0: if not recovery: node.start( lib_name=args.package, workspace=args.workspace, label=args.label, common_dir=self.common_dir, members_info=self.consortium.get_members_info(), **forwarded_args, ) else: node.recover( lib_name=args.package, workspace=args.workspace, label=args.label, common_dir=self.common_dir, ledger_dir=ledger_dir, read_only_ledger_dir=read_only_ledger_dir, snapshot_dir=snapshot_dir, **forwarded_args, ) # When a recovery network in started without an existing network, # it is not possible to know the local node IDs before the first # node is started and has recovered the ledger. The local node IDs # are adjusted accordingly then. if self.existing_network is None: self.wait_for_state( node, "partOfPublicNetwork", timeout=args.ledger_recovery_timeout, ) self._adjust_local_node_ids(node) else: # When a new service is started, initial nodes join without a snapshot self._add_node( node, args.package, args, recovery=recovery, ledger_dir=ledger_dir, from_snapshot=snapshot_dir is not None, read_only_ledger_dir=read_only_ledger_dir, snapshot_dir=snapshot_dir, ) except Exception: LOG.exception("Failed to start node {}".format(node.node_id)) raise self.election_duration = (args.bft_view_change_timeout_ms / 1000 if args.consensus == "bft" else args.raft_election_timeout_ms / 1000) * 2 LOG.info("All nodes started") # Here, recovery nodes might still be catching up, and possibly swamp # the current primary which would not be able to serve user requests primary, _ = self.find_primary( timeout=args.ledger_recovery_timeout if recovery else 3) return primary
def _start_all_nodes( self, args, recovery=False, ledger_dir=None, read_only_ledger_dir=None, snapshot_dir=None, ): self.args = args hosts = self.hosts if not args.package: raise ValueError("A package name must be specified.") self.status = ServiceStatus.OPENING LOG.info("Opening CCF service on {}".format(hosts)) forwarded_args = { arg: getattr(args, arg) for arg in infra.network.Network.node_args_to_forward } for i, node in enumerate(self.nodes): try: if i == 0: if not recovery: node.start( lib_name=args.package, workspace=args.workspace, label=args.label, common_dir=self.common_dir, members_info=self.consortium.get_members_info(), **forwarded_args, ) else: node.recover( lib_name=args.package, workspace=args.workspace, label=args.label, common_dir=self.common_dir, ledger_dir=ledger_dir, read_only_ledger_dir=read_only_ledger_dir, snapshot_dir=snapshot_dir, **forwarded_args, ) self.wait_for_state( node, infra.node.State.PART_OF_PUBLIC_NETWORK.value, timeout=args.ledger_recovery_timeout, ) else: # When a new service is started, initial nodes join without a snapshot self._add_node( node, args.package, args, recovery=recovery, ledger_dir=ledger_dir, from_snapshot=snapshot_dir is not None, read_only_ledger_dir=read_only_ledger_dir, snapshot_dir=snapshot_dir, ) except Exception: LOG.exception("Failed to start node {}".format( node.local_node_id)) raise self.election_duration = (args.bft_view_change_timeout_ms / 1000 if args.consensus == "bft" else args.raft_election_timeout_ms / 1000) # After an election timeout, we need some additional roundtrips to complete before # the nodes _observe_ that an election has occurred self.observed_election_duration = self.election_duration + 1 LOG.info("All nodes started") # Here, recovery nodes might still be catching up, and possibly swamp # the current primary which would not be able to serve user requests primary, _ = self.find_primary( timeout=args.ledger_recovery_timeout if recovery else 3) return primary