def get_worker_spec( self, node_config: Conf, min_nodes=1, max_nodes=1, max_restarts=0, monitor_interval=0.01, ): rdzv_params = RendezvousParameters( backend="etcd", endpoint=self._etcd_server.get_endpoint(), run_id=self._run_id, min_nodes=min_nodes, max_nodes=max_nodes, ) rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_params) return WorkerSpec( role=node_config.role, local_world_size=node_config.local_world_size, entrypoint=node_config.entrypoint, args=node_config.args, rdzv_handler=rdzv_handler, max_restarts=max_restarts, monitor_interval=monitor_interval, redirects=node_config.redirects, tee=node_config.tee, )
def _get_worker_spec( self, max_restarts=1, monitor_interval=1.0, role="test_trainer", local_world_size=8, ): run_id = str(uuid.uuid4().int) endpoint = self._etcd_server.get_endpoint() rdzv_params = RendezvousParameters(backend="etcd", endpoint=endpoint, run_id=run_id, min_nodes=1, max_nodes=1) rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_params) spec = WorkerSpec( role=role, local_world_size=local_world_size, fn=do_nothing, args=(), rdzv_handler=rdzv_handler, max_restarts=max_restarts, monitor_interval=monitor_interval, ) return spec
def _get_worker_spec( self, fn=None, cmd=None, args=(), max_restarts=1, num_agents=1, monitor_interval=0.1, local_world_size=8, ): run_id = str(uuid.uuid4().int) rdzv_params = RendezvousParameters( backend="etcd", endpoint=f"{self._etcd_server.get_endpoint()}", run_id=run_id, min_nodes=num_agents, max_nodes=num_agents, ) rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_params) spec = WorkerSpec( role="test_trainer", local_world_size=local_world_size, fn=fn, cmd=cmd, args=args, rdzv_handler=rdzv_handler, max_restarts=max_restarts, monitor_interval=monitor_interval, ) return spec
def run_agent(run_id, etcd_host, etcd_port, start_method, worker_fn, worker_args=()): rdzv_params = RendezvousParameters( backend="etcd", endpoint=f"{etcd_host}:{etcd_port}", run_id=run_id, min_nodes=2, max_nodes=2, ) rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_params) spec = WorkerSpec( role="test_trainer", local_world_size=1, fn=worker_fn, args=worker_args, rdzv_handler=rdzv_handler, max_restarts=3, monitor_interval=1, ) agent = LocalElasticAgent(spec, start_method) agent.run()
def test_get_etcd_rdzv_handler(self): """ Check that we can create the handler with a minimum set of params """ rdzv_params = RendezvousParameters( backend="etcd", endpoint=f"{self._etcd_server.get_endpoint()}", run_id=f"{uuid.uuid4()}", min_nodes=1, max_nodes=1, ) etcd_rdzv = rdzv_registry_oss.get_rendezvous_handler(rdzv_params) self.assertIsNotNone(etcd_rdzv)
def _run_agent( run_id, etcd_host, etcd_port, min_size, max_size, func_to_run, args, local_world_size=8, role="test_trainer", output_dict=None, agent_barrier_timeout=300, ): rdzv_params = RendezvousParameters( backend="etcd", endpoint=f"{etcd_host}:{etcd_port}", run_id=run_id, min_nodes=min_size, max_nodes=max_size, ) rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_params) spec = WorkerSpec( role=role, local_world_size=local_world_size, fn=func_to_run, args=args, rdzv_handler=rdzv_handler, max_restarts=2, monitor_interval=1, ) agent = LocalElasticAgent( spec, start_method="fork", exit_barrier_timeout=agent_barrier_timeout ) res = agent.run() if output_dict is not None: key = str(uuid.uuid4().int) output_dict[key] = (role, res)
def main(args=None): # If ``args`` not passed, defaults to ``sys.argv[:1]`` args = parse_args(args) min_nodes, max_nodes = parse_min_max_nnodes(args.nnodes) assert 0 < min_nodes <= max_nodes assert args.max_restarts >= 0 elastic_agent = None if args.standalone: etcd_server = EtcdServer() etcd_server.start() args.rdzv_backend = "etcd" args.rdzv_endpoint = etcd_server.get_endpoint() args.rdzv_id = str(uuid.uuid4()) log.info(f"\n**************************************\n" f"Rendezvous info:\n" f"--rdzv_backend={args.rdzv_backend} " f"--rdzv_endpoint={args.rdzv_endpoint} " f"--rdzv_id={args.rdzv_id}\n" f"**************************************\n") nproc_per_node = determine_local_world_size(args.nproc_per_node) if "OMP_NUM_THREADS" not in os.environ and nproc_per_node > 1: omp_num_threads = 1 print( f"*****************************************\n" f"Setting OMP_NUM_THREADS environment variable for each process to be " f"{omp_num_threads} in default, to avoid your system being overloaded, " f"please further tune the variable for optimal performance in " f"your application as needed. \n" f"*****************************************") # This env variable will be passed down to the subprocesses os.environ["OMP_NUM_THREADS"] = str(omp_num_threads) with_python = not args.no_python cmd = [] if with_python: cmd = [sys.executable, "-u"] if args.module: cmd.append("-m") else: if args.module: raise ValueError("Don't use both the '--no_python' flag" " and the '--module' flag at the same time.") cmd.append(args.training_script) cmd.extend(args.training_script_args) rdzv_parameters = RendezvousParameters( backend=args.rdzv_backend, endpoint=args.rdzv_endpoint, run_id=args.rdzv_id, min_nodes=min_nodes, max_nodes=max_nodes, **_parse_rendezvous_config(args.rdzv_conf), ) rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_parameters) try: spec = WorkerSpec( role=args.role, local_world_size=nproc_per_node, entrypoint=cmd[0], args=(*cmd[1:], ), rdzv_handler=rdzv_handler, max_restarts=args.max_restarts, monitor_interval=args.monitor_interval, redirects=Std.from_str(args.redirects), tee=Std.from_str(args.tee), ) metrics.initialize_metrics() elastic_agent = LocalElasticAgent(spec=spec, start_method=args.start_method, log_dir=args.log_dir) run_result = elastic_agent.run(spec.role) events.record( elastic_agent.get_agent_status_event(WorkerState.SUCCEEDED)) if run_result.is_failed(): # ChildFailedError is treated specially by @record # if the error files for the failed children exist # @record will copy the first error (root cause) # to the error file of the launcher process raise ChildFailedError( name=args.training_script, failures=run_result.failures, ) except ChildFailedError: raise except Exception: if elastic_agent: events.record( elastic_agent.get_agent_status_event(WorkerState.FAILED)) else: events.record(_construct_event(args)) raise finally: rdzv_handler.shutdown() if args.standalone: etcd_server.stop()
def main(args=None): # If ``args`` not passed, defaults to ``sys.argv[:1]`` args = parse_args(args) min_nodes, max_nodes = parse_min_max_nnodes(args.nnodes) assert 0 < min_nodes <= max_nodes assert args.max_restarts >= 0 if args.standalone: etcd_server = EtcdServer() etcd_server.start() args.rdzv_backend = "etcd" args.rdzv_endpoint = etcd_server.get_endpoint() args.rdzv_id = str(uuid.uuid4()) log.info( f"\n**************************************\n" f"Rendezvous info:\n" f"--rdzv_backend={args.rdzv_backend} " f"--rdzv_endpoint={args.rdzv_endpoint} " f"--rdzv_id={args.rdzv_id}\n" f"**************************************\n" ) nproc_per_node = determine_local_world_size(args.nproc_per_node) omp_num_threads = None if "OMP_NUM_THREADS" not in os.environ and nproc_per_node > 1: omp_num_threads = 1 print( f"*****************************************\n" f"Setting OMP_NUM_THREADS environment variable for each process to be " f"{omp_num_threads} in default, to avoid your system being overloaded, " f"please further tune the variable for optimal performance in " f"your application as needed. \n" f"*****************************************" ) with_python = not args.no_python cmd = [] if with_python: cmd = [sys.executable, "-u"] if args.module: cmd.append("-m") else: if args.module: raise ValueError( "Don't use both the '--no_python' flag" " and the '--module' flag at the same time." ) cmd.append(args.training_script) cmd.extend(args.training_script_args) rdzv_parameters = RendezvousParameters( backend=args.rdzv_backend, endpoint=args.rdzv_endpoint, run_id=args.rdzv_id, min_nodes=min_nodes, max_nodes=max_nodes, **_parse_rdzv_conf(args.rdzv_conf), ) rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_parameters) try: spec = WorkerSpec( role=args.role, local_world_size=nproc_per_node, fn=wrapper_fn, args=(omp_num_threads, cmd), rdzv_handler=rdzv_handler, max_restarts=args.max_restarts, monitor_interval=args.monitor_interval, ) metrics.initialize_metrics() elastic_agent = LocalElasticAgent(spec, start_method=args.start_method) elastic_agent.run(spec.role) finally: rdzv_handler.shutdown() if args.standalone: etcd_server.stop()
def main(args=None): # If ``args`` not passed, defaults to ``sys.argv[:1]`` args = parse_args(args) min_nodes, max_nodes = parse_min_max_nnodes(args.nnodes) assert 0 < min_nodes <= max_nodes assert args.max_restarts >= 0 if args.standalone: etcd_server = EtcdServer() etcd_server.start() args.rdzv_backend = "etcd" args.rdzv_endpoint = etcd_server.get_endpoint() args.rdzv_id = str(uuid.uuid4()) log.info(f"\n**************************************\n" f"Rendezvous info:\n" f"--rdzv_backend={args.rdzv_backend} " f"--rdzv_endpoint={args.rdzv_endpoint} " f"--rdzv_id={args.rdzv_id}\n" f"**************************************\n") nproc_per_node = determine_local_world_size(args.nproc_per_node) omp_num_threads = None if "OMP_NUM_THREADS" not in os.environ and nproc_per_node > 1: omp_num_threads = 1 print( f"*****************************************\n" f"Setting OMP_NUM_THREADS environment variable for each process to be " f"{omp_num_threads} in default, to avoid your system being overloaded, " f"please further tune the variable for optimal performance in " f"your application as needed. \n" f"*****************************************") # This env variable will be passed down to the subprocesses os.environ["OMP_NUM_THREADS"] = str(omp_num_threads) with_python = not args.no_python cmd = [] if with_python: cmd = [sys.executable, "-u"] if args.module: cmd.append("-m") else: if args.module: raise ValueError("Don't use both the '--no_python' flag" " and the '--module' flag at the same time.") cmd.append(args.training_script) cmd.extend(args.training_script_args) rdzv_parameters = RendezvousParameters( backend=args.rdzv_backend, endpoint=args.rdzv_endpoint, run_id=args.rdzv_id, min_nodes=min_nodes, max_nodes=max_nodes, **_parse_rdzv_conf(args.rdzv_conf), ) rdzv_handler = rdzv_registry.get_rendezvous_handler(rdzv_parameters) try: spec = WorkerSpec( role=args.role, local_world_size=nproc_per_node, cmd=cmd, rdzv_handler=rdzv_handler, max_restarts=args.max_restarts, monitor_interval=args.monitor_interval, ) metrics.initialize_metrics() elastic_agent = LocalElasticAgent(spec, start_method=args.start_method) group_result = elastic_agent.run(spec.role) if group_result.is_failed(): min_rank = min(group_result.failures.keys()) failure = group_result.failures[min_rank] # Note: this line will raise an exception to indicate to the # scheduler process that something went wrong. # If any workers wrote the error file, it will be propagated # to the scheduler specific destination. process_failure(failure) msg = f""" *********************************************************************** \n ***********************USER CODE FAILED WITH ERROR****************** \n\n {get_failure_message(failure)} \n ******************************************************************** \n\n ******************************************************************** \n """ log.warning(msg) # Expected (0-127), 0 - success, anything else - failure sys.exit(abs(failure.exit_code)) finally: rdzv_handler.shutdown() if args.standalone: etcd_server.stop() cleanup()