def submit(self, process, *args, **inputs): """ Submit the process with the supplied inputs to this runner immediately returning control to the interpreter. The return value will be the calculation node of the submitted process :param process: the process class to submit :param inputs: the inputs to be passed to the process :return: the calculation node of the process """ assert not utils.is_process_function( process), 'Cannot submit a process function' assert not self._closed process = self.instantiate_process(process, *args, **inputs) if not process.metadata.store_provenance: raise exceptions.InvalidOperation( 'cannot submit a process with `store_provenance=False`') if process.metadata.get('dry_run', False): raise exceptions.InvalidOperation( 'cannot submit a process from within another with `dry_run=True`' ) if self._rmq_submit: self.persister.save_checkpoint(process) process.close() self.controller.continue_process(process.pid, nowait=False, no_reply=True) else: self.loop.add_callback(process.step_until_terminated) return process.node
def __init__( self, inputs: Optional[dict] = None, logger: Optional[logging.Logger] = None, runner: Optional['Runner'] = None, enable_persistence: bool = True ) -> None: """Construct a WorkChain instance. Construct the instance only if it is a sub class of `WorkChain`, otherwise raise `InvalidOperation`. :param inputs: work chain inputs :param logger: aiida logger :param runner: work chain runner :param enable_persistence: whether to persist this work chain """ if self.__class__ == WorkChain: raise exceptions.InvalidOperation('cannot construct or launch a base `WorkChain` class.') super().__init__(inputs, logger, runner, enable_persistence=enable_persistence) self._stepper: Optional[Stepper] = None self._awaitables: List[Awaitable] = [] self._context = AttributeDict()
def __init__(self, inputs=None, logger=None, runner=None, enable_persistence=True): """Construct a WorkChain instance. Construct the instance only if it is a sub class of `WorkChain`, otherwise raise `InvalidOperation`. :param inputs: work chain inputs :type inputs: dict :param logger: aiida logger :type logger: :class:`logging.Logger` :param runner: work chain runner :type: :class:`aiida.engine.runners.Runner` :param enable_persistence: whether to persist this work chain :type enable_persistence: bool """ if self.__class__ == WorkChain: raise exceptions.InvalidOperation( 'cannot construct or launch a base `WorkChain` class.') super().__init__(inputs, logger, runner, enable_persistence=enable_persistence) self._stepper = None self._awaitables = [] self._context = AttributeDict()
def delete(self, pk): try: session = get_scoped_session() session.query(DbComputer).get(pk).delete() session.commit() except SQLAlchemyError as exc: raise exceptions.InvalidOperation( 'Unable to delete the requested computer: it is possible that there ' 'is at least one node using this computer (original message: {})' .format(exc))
def copy(self): """Create an unstored clone of an already stored `Computer`.""" if not self.is_stored: raise exceptions.InvalidOperation('You can copy a computer only after having stored it') dbomputer = models.DbComputer.objects.get(pk=self.pk) dbomputer.pk = None newobject = self.__class__.from_dbmodel(dbomputer) return newobject
def delete(self, pk): """Delete the computer with the given pk.""" from django.db.models.deletion import ProtectedError try: models.DbComputer.objects.filter(pk=pk).delete() except ProtectedError: raise exceptions.InvalidOperation( 'Unable to delete the requested computer: there' 'is at least one node using this computer' )
def run(self): """Run the calculation job. This means invoking the `presubmit` and storing the temporary folder in the node's repository. Then we move the process in the `Wait` state, waiting for the `UPLOAD` transport task to be started. """ from aiida.orm import Code, load_node from aiida.common.folders import SandboxFolder, SubmitTestFolder from aiida.common.exceptions import InputValidationError # The following conditional is required for the caching to properly work. Even if the source node has a process # state of `Finished` the cached process will still enter the running state. The process state will have then # been overridden by the engine to `Running` so we cannot check that, but if the `exit_status` is anything other # than `None`, it should mean this node was taken from the cache, so the process should not be rerun. if self.node.exit_status is not None: return self.node.exit_status if self.inputs.metadata.dry_run: folder_class = SubmitTestFolder else: folder_class = SandboxFolder with folder_class() as folder: computer = self.node.computer if not self.inputs.metadata.dry_run and self.node.has_cached_links(): raise exceptions.InvalidOperation('calculation node has unstored links in cache') calc_info, script_filename = self.presubmit(folder) calc_info.uuid = str(self.uuid) input_codes = [load_node(_.code_uuid, sub_classes=(Code,)) for _ in calc_info.codes_info] for code in input_codes: if not code.can_run_on(computer): raise InputValidationError( 'The selected code {} for calculation {} cannot run on computer {}'.format( code.pk, self.node.pk, computer.name)) # After this call, no modifications to the folder should be done self.node.put_object_from_tree(folder.abspath, force=True) if self.inputs.metadata.dry_run: from aiida.engine.daemon.execmanager import upload_calculation from aiida.transports.plugins.local import LocalTransport with LocalTransport() as transport: transport.chdir(folder.abspath) upload_calculation(self.node, transport, calc_info, script_filename, dry_run=True) self.node.dry_run_info = { 'folder': folder.abspath, 'script_filename': script_filename } return plumpy.Stop(None, True) # Launch the upload operation return plumpy.Wait(msg='Waiting to upload', data=(UPLOAD_COMMAND, calc_info, script_filename))
def __init__(self, *args, **kwargs): """Construct a CalcJob instance. Construct the instance only if it is a sub class of `CalcJob`, otherwise raise `InvalidOperation`. See documentation of :class:`aiida.engine.Process`. """ if self.__class__ == CalcJob: raise exceptions.InvalidOperation('cannot construct or launch a base `CalcJob` class.') super().__init__(*args, **kwargs)
def copy(self): """Create an unstored clone of an already stored `Computer`.""" session = get_scoped_session() if not self.is_stored: raise exceptions.InvalidOperation( 'You can copy a computer only after having stored it') dbcomputer = copy(self._dbmodel) make_transient(dbcomputer) session.add(dbcomputer) newobject = self.__class__.from_dbmodel(dbcomputer) return newobject
def run(self): """ Run the calculation, we put it in the TOSUBMIT state and then wait for it to be copied over, submitted, retrieved, etc. """ calc_state = self.calc.get_state() if calc_state == calc_states.FINISHED: return 0 elif calc_state != calc_states.NEW: raise exceptions.InvalidOperation( 'Cannot submit a calculation not in {} state (the current state is {})'.format( calc_states.NEW, calc_state )) self.calc._set_state(calc_states.TOSUBMIT) # Launch the submit operation return plumpy.Wait(msg='Waiting to submit', data=SUBMIT_COMMAND)
def _setup_db_record(self): """ Create the database record for this process and the links with respect to its inputs This function will set various attributes on the node that serve as a proxy for attributes of the Process. This is essential as otherwise this information could only be introspected through the Process itself, which is only available to the interpreter that has it in memory. To make this data introspectable from any interpreter, for example for the command line interface, certain Process attributes are proxied through the calculation node. In addition, the parent calculation will be setup with a CALL link if applicable and all inputs will be linked up as well. """ assert self.inputs is not None assert not self.node.is_sealed, 'process node cannot be sealed when setting up the database record' # Store important process attributes in the node proxy self.node.set_process_state(None) self.node.set_process_label(self.__class__.__name__) self.node.set_process_type(self.__class__.build_process_type()) parent_calc = self.get_parent_calc() if parent_calc and self.metadata.store_provenance: if isinstance(parent_calc, orm.CalculationNode): raise exceptions.InvalidOperation('calling processes from a calculation type process is forbidden.') if isinstance(self.node, orm.CalculationNode): self.node.add_incoming(parent_calc, LinkType.CALL_CALC, self.metadata.call_link_label) elif isinstance(self.node, orm.WorkflowNode): self.node.add_incoming(parent_calc, LinkType.CALL_WORK, self.metadata.call_link_label) self._setup_metadata() self._setup_inputs()
def __copy__(self): """Copying a Data node is not supported, use copy.deepcopy or call Data.clone().""" raise exceptions.InvalidOperation('copying a Data node is not supported, use copy.deepcopy')