def submit(self, calc_type=None, app_name=None, app_args=None, stdout=None, stderr=None, dry_run=False, wait_on_run=False): if app_name is not None: app = self.get_app(app_name) elif calc_type is not None: app = self.default_app(calc_type) else: raise ExecutorException("Either app_name or calc_type must be set") default_workdir = os.getcwd() task = Task(app, app_args, default_workdir, stdout, stderr, self.workerID) runline = [] runline.extend(task.app.full_path.split()) if task.app_args is not None: runline.extend(task.app_args.split()) task.runline = ' '.join(runline) # Allow to be queried if dry_run: task.dry_run = True logger.info('Test (No submit) Runline: {}'.format( ' '.join(runline))) task._set_complete(dry_run=True) else: # Launch Task self._launch_with_retries(task, runline, subgroup_launch=False, wait_on_run=wait_on_run) if not task.timer.timing: task.timer.start() task.submit_time = task.timer.tstart # Time not date - may not need if using timer. self.list_of_tasks.append(task) return task
def submit(self, calc_type, num_procs=None, num_nodes=None, ranks_per_node=None, machinefile=None, app_args=None, stdout=None, stderr=None, stage_inout=None, hyperthreads=False, dry_run=False, wait_on_run=False, extra_args=None): """Creates a new task, and either executes or schedules execution. The created task object is returned. Parameters ---------- calc_type: String The calculation type: 'sim' or 'gen' num_procs: int, optional The total number of MPI tasks on which to submit the task num_nodes: int, optional The number of nodes on which to submit the task ranks_per_node: int, optional The ranks per node for this task machinefile: string, optional Name of a machinefile for this task to use app_args: string, optional A string of the application arguments to be added to task submit command line stdout: string, optional A standard output filename stderr: string, optional A standard error filename stage_inout: string, optional A directory to copy files from; default will take from current directory hyperthreads: boolean, optional Whether to submit MPI tasks to hyperthreads dry_run: boolean, optional Whether this is a dry_run - no task will be launched; instead runline is printed to logger (at INFO level) wait_on_run: boolean, optional Whether to wait for task to be polled as RUNNING (or other active/end state) before continuing extra_args: String, optional Additional command line arguments to supply to MPI runner. If arguments are recognised as those used in auto_resources (num_procs, num_nodes, ranks_per_node) they will be used in resources determination unless also supplied in the direct options. Returns ------- task: obj: Task The lauched task object Note that if some combination of num_procs, num_nodes, and ranks_per_node is provided, these will be honored if possible. If resource detection is on and these are omitted, then the available resources will be divided among workers. """ app = self.default_app(calc_type) default_workdir = os.getcwd() task = Task(app, app_args, default_workdir, stdout, stderr, self.workerID) if stage_inout is not None: logger.warning("stage_inout option ignored in this " "executor - runs in-place") mpi_specs = self.mpi_runner.get_mpi_specs(task, num_procs, num_nodes, ranks_per_node, machinefile, hyperthreads, extra_args, self.auto_resources, self.resources, self.workerID) mpi_command = self.mpi_runner.mpi_command sglaunch = self.mpi_runner.subgroup_launch runline = launcher.form_command(mpi_command, mpi_specs) runline.extend(task.app.full_path.split()) if task.app_args is not None: runline.extend(task.app_args.split()) task.runline = ' '.join(runline) # Allow to be queried if dry_run: task.dry_run = True logger.info('Test (No submit) Runline: {}'.format( ' '.join(runline))) task.set_as_complete() else: # Launch Task self._launch_with_retries(task, runline, sglaunch, wait_on_run) if not task.timer.timing: task.timer.start() task.submit_time = task.timer.tstart # Time not date - may not need if using timer. self.list_of_tasks.append(task) return task