def run(self): import plumpy from aiida.engine.processes.calcjobs.tasks import RETRIEVE_COMMAND from aiida.common.folders import SandboxFolder _ = super(VaspImmigrant, self).run() # Make sure the retrieve list is set (done in presubmit so we need to call that also) with SandboxFolder() as folder: self.presubmit(folder) settings = self.inputs.get('settings', None) settings = settings.get_dict() if settings else {} remote_path = settings.get('import_from_path', None) if not remote_path: raise InputValidationError( 'immigrant calculations need an input "settings" containing a key "import_from_path"!' ) self.node.set_remote_workdir(remote_path) # pylint: disable=protected-access remotedata = get_data_node('remote', computer=self.node.computer, remote_path=remote_path) remotedata.add_incoming(self.node, link_type=LinkType.CREATE, link_label='remote_folder') remotedata.store() return plumpy.Wait(msg='Waiting to retrieve', data=RETRIEVE_COMMAND)
def run(self): """Run the calculation job. This means invoking the `presubmit` and storing the temporary folder in the node's repository. Then we move the process in the `Wait` state, waiting for the `UPLOAD` transport task to be started. """ if self.inputs.metadata.dry_run: from aiida.common.folders import SubmitTestFolder from aiida.engine.daemon.execmanager import upload_calculation from aiida.transports.plugins.local import LocalTransport with LocalTransport() as transport: with SubmitTestFolder() as folder: calc_info = self.presubmit(folder) transport.chdir(folder.abspath) upload_calculation(self.node, transport, calc_info, folder, inputs=self.inputs, dry_run=True) self.node.dry_run_info = { 'folder': folder.abspath, 'script_filename': self.node.get_option('submit_script_filename') } return plumpy.Stop(None, True) # The following conditional is required for the caching to properly work. Even if the source node has a process # state of `Finished` the cached process will still enter the running state. The process state will have then # been overridden by the engine to `Running` so we cannot check that, but if the `exit_status` is anything other # than `None`, it should mean this node was taken from the cache, so the process should not be rerun. if self.node.exit_status is not None: return self.node.exit_status # Launch the upload operation return plumpy.Wait(msg='Waiting to upload', data=UPLOAD_COMMAND)
def run(self): state = self.calc.get_state() if state == calc_states.NEW: return super(ContinueJobCalculation, self).run() if state in [calc_states.TOSUBMIT, calc_states.SUBMITTING]: return plumpy.Wait(msg='Waiting to submit', data=SUBMIT_COMMAND) elif state in calc_states.WITHSCHEDULER: return plumpy.Wait(msg='Waiting for scheduler', data=UPDATE_SCHEDULER_COMMAND) elif state in [calc_states.COMPUTED, calc_states.RETRIEVING]: return plumpy.Wait(msg='Waiting to retrieve', data=RETRIEVE_COMMAND) elif state == calc_states.PARSING: return self.retrieved(True)
def run(self): """Run the calculation job. This means invoking the `presubmit` and storing the temporary folder in the node's repository. Then we move the process in the `Wait` state, waiting for the `UPLOAD` transport task to be started. """ from aiida.orm import Code, load_node from aiida.common.folders import SandboxFolder, SubmitTestFolder from aiida.common.exceptions import InputValidationError # The following conditional is required for the caching to properly work. Even if the source node has a process # state of `Finished` the cached process will still enter the running state. The process state will have then # been overridden by the engine to `Running` so we cannot check that, but if the `exit_status` is anything other # than `None`, it should mean this node was taken from the cache, so the process should not be rerun. if self.node.exit_status is not None: return self.node.exit_status if self.inputs.metadata.dry_run: folder_class = SubmitTestFolder else: folder_class = SandboxFolder with folder_class() as folder: computer = self.node.computer if not self.inputs.metadata.dry_run and self.node.has_cached_links(): raise exceptions.InvalidOperation('calculation node has unstored links in cache') calc_info, script_filename = self.presubmit(folder) calc_info.uuid = str(self.uuid) input_codes = [load_node(_.code_uuid, sub_classes=(Code,)) for _ in calc_info.codes_info] for code in input_codes: if not code.can_run_on(computer): raise InputValidationError( 'The selected code {} for calculation {} cannot run on computer {}'.format( code.pk, self.node.pk, computer.name)) # After this call, no modifications to the folder should be done self.node.put_object_from_tree(folder.abspath, force=True) if self.inputs.metadata.dry_run: from aiida.engine.daemon.execmanager import upload_calculation from aiida.transports.plugins.local import LocalTransport with LocalTransport() as transport: transport.chdir(folder.abspath) upload_calculation(self.node, transport, calc_info, script_filename, dry_run=True) self.node.dry_run_info = { 'folder': folder.abspath, 'script_filename': script_filename } return plumpy.Stop(None, True) # Launch the upload operation return plumpy.Wait(msg='Waiting to upload', data=(UPLOAD_COMMAND, calc_info, script_filename))
def run(self): """ Run the calculation, we put it in the TOSUBMIT state and then wait for it to be copied over, submitted, retrieved, etc. """ calc_state = self.calc.get_state() if calc_state == calc_states.FINISHED: return 0 elif calc_state != calc_states.NEW: raise exceptions.InvalidOperation( 'Cannot submit a calculation not in {} state (the current state is {})'.format( calc_states.NEW, calc_state )) self.calc._set_state(calc_states.TOSUBMIT) # Launch the submit operation return plumpy.Wait(msg='Waiting to submit', data=SUBMIT_COMMAND)
def run(self): import plumpy from aiida.common.datastructures import calc_states from aiida.common.links import LinkType from aiida.work.job_processes import RETRIEVE_COMMAND from aiida.orm.calculation.job import _input_subfolder _ = super(VaspImmigrantJobProcess, self).run() def return_empty_list(): return [] setattr(self.calc, '_get_retrieve_list', self.calc.max_retrieve_list) setattr(self.calc, '_get_retrieve_singlefile_list', return_empty_list) setattr(self.calc, '_get_retrieve_temporary_list', return_empty_list) settings = self.calc.get_inputs_dict().get('settings', None) settings = settings.get_dict() if settings else {} remote_path = settings.get('import_from_path', None) if not remote_path: raise InputValidationError('immigrant calculations need an input "settings" containing a key "import_from_path"!') self.calc._set_state(calc_states.SUBMITTING) # pylint: disable=protected-access self.calc._set_attr('remote_workdir', remote_path) # pylint: disable=protected-access remotedata = get_data_node('remote', computer=self.calc.get_computer(), remote_path=remote_path) remotedata.add_link_from(self.calc, label='remote_folder', link_type=LinkType.CREATE) remotedata.store() remote_path = py_path.local(remote_path) with self.calc.get_computer().get_transport() as transport: raw_input_folder = self.calc.folder.get_subfolder(_input_subfolder, create=True) transport.get(remote_path.join('INCAR').strpath, raw_input_folder.abspath) transport.get(remote_path.join('KPOINTS').strpath, raw_input_folder.abspath) transport.get(remote_path.join('POSCAR').strpath, raw_input_folder.abspath) if 'wavefunctions' in self.inputs: transport.get(remote_path.join('WAVECAR').strpath, raw_input_folder.abspath) if 'charge_density' in self.inputs: transport.get(remote_path.join('CHGCAR').strpath, raw_input_folder.abspath) self.calc._set_state(calc_states.COMPUTED) # pylint: disable=protected-access return plumpy.Wait(msg='Waiting to retrieve', data=RETRIEVE_COMMAND)
def run(self): return plumpy.Wait(self.next_step)
def run(self): print('Now I am running: {:}'.format(self.state)) return plumpy.Wait(self.after_resume_and_exec)
def run(self, **kwargs): print("Now I am running: {:}".format(self.state)) return plumpy.Wait(self.after_resume_and_exec)