Exemplo n.º 1
0
    def run_task(self, fw_spec):

        try:
            vasp_run = Vasprun("vasprun.xml", parse_dos=False,
                               parse_eigen=False)
            outcar = Outcar(os.path.join(os.getcwd(), "OUTCAR"))
        except Exception as e:
            raise RuntimeError("Can't get valid results from relaxed run: " +
                               str(e))

        user_incar_settings = MPNonSCFVaspInputSet.get_incar_settings(
            vasp_run, outcar)
        user_incar_settings.update({"NPAR": 2})
        structure = MPNonSCFVaspInputSet.get_structure(vasp_run, outcar,
                                                       initial_structure=True)

        if self.line:
            mpnscfvip = MPNonSCFVaspInputSet(user_incar_settings, mode="Line")
            for k, v in mpnscfvip.get_all_vasp_input(
                    structure, generate_potcar=True).items():
                v.write_file(os.path.join(os.getcwd(), k))
            kpath = HighSymmKpath(structure)
        else:
            mpnscfvip = MPNonSCFVaspInputSet(user_incar_settings,
                                             mode="Uniform")
            for k, v in mpnscfvip.get_all_vasp_input(
                    structure, generate_potcar=True).items():
                v.write_file(os.path.join(os.getcwd(), k))

        if self.line:
            return FWAction(stored_data={"kpath": kpath.kpath,
                                         "kpath_name": kpath.name})
        else:
            return FWAction()
Exemplo n.º 2
0
    def run_task(self, fw_spec):
        user_incar_settings = {"NCORE": 8}
        # vol = Poscar.from_file("POSCAR").structure.volume
        # kppra_vol = self.kpoints_density / vol
        if self.line:
            MPNonSCFSet.from_prev_calc(
                os.getcwd(),
                mode="Line",
                copy_chgcar=False,
                user_incar_settings=user_incar_settings,
                kpoints_line_density=self.kpoints_line_density).write_input(
                    '.')
            kpath = HighSymmKpath(Poscar.from_file("POSCAR").structure)

            return FWAction(stored_data={
                "kpath": kpath.kpath,
                "kpath_name": kpath.name
            })
        else:
            MPNonSCFSet.from_prev_calc(
                os.getcwd(),
                mode="Uniform",
                copy_chgcar=False,
                user_incar_settings=user_incar_settings,
                reciprocal_density=self.kpoints_density).write_input('.')
            return FWAction()
Exemplo n.º 3
0
    def run_task(self, fw_spec):
        cmd_spec = self['command_spec']
        ilabels = self.get('inputs')
        olabels = self.get('outputs')
        if ilabels is None:
            ilabels = []
        else:
            assert isinstance(ilabels, list), '"inputs" must be a list'
        if olabels is None:
            olabels = []
        else:
            assert isinstance(olabels, list), '"outputs" must be a list'

        inputs = []
        outputs = []
        for ios, labels in zip([inputs, outputs], [ilabels, olabels]):
            # cmd_spec: {label: {{binding: {}}, {source: {}}, {target: {}}}}
            for label in labels:
                if isinstance(cmd_spec[label], basestring):
                    inp = []
                    for item in fw_spec[cmd_spec[label]]:
                        if 'source' in item:
                            inp.append(item)
                        else:
                            inp.append({'source': item})
                else:
                    inp = {}
                    for key in ['binding', 'source', 'target']:
                        if key in cmd_spec[label]:
                            item = cmd_spec[label][key]
                            if isinstance(item, basestring):
                                inp[key] = fw_spec[item]
                            elif isinstance(item, dict):
                                inp[key] = item
                            else:
                                raise ValueError
                ios.append(inp)
        command = cmd_spec['command']

        outlist = self.command_line_tool(command, inputs, outputs)

        if len(outlist) > 0:
            if self.get('chunk_number') is not None:
                mod_spec = []
                if len(olabels) > 1:
                    assert len(olabels) == len(outlist)
                    for olab, out in zip(olabels, outlist):
                        for item in out:
                            mod_spec.append({'_push': {olab: item}})
                else:
                    for out in outlist:
                        mod_spec.append({'_push': {olabels[0]: out}})
                return FWAction(mod_spec=mod_spec)
            else:
                output_dict = {}
                for olab, out in zip(olabels, outlist):
                    output_dict[olab] = out
                return FWAction(update_spec=output_dict)
        else:
            return FWAction()
Exemplo n.º 4
0
    def _run_task_internal(self, fw_spec, stdin):
        # run the program
        stdout = subprocess.PIPE if self.store_stdout or self.stdout_file else None
        stderr = subprocess.PIPE if self.store_stderr or self.stderr_file else None
        returncodes = []
        for s in self.script:
            p = subprocess.Popen(s,
                                 executable=self.shell_exe,
                                 stdin=stdin,
                                 stdout=stdout,
                                 stderr=stderr,
                                 shell=self.use_shell)

            # communicate in the standard in and get back the standard out and returncode
            if self.stdin_key:
                (stdout, stderr) = p.communicate(fw_spec[self.stdin_key])
            else:
                (stdout, stderr) = p.communicate()
            returncodes.append(p.returncode)

            # stop execution if any script command fails
            if p.returncode != 0:
                break

        # write out the output, error files if specified

        stdout = stdout.decode('utf-8') if isinstance(stdout,
                                                      bytes) else stdout
        stderr = stderr.decode('utf-8') if isinstance(stderr,
                                                      bytes) else stderr

        if self.stdout_file:
            with open(self.stdout_file, 'a+') as f:
                f.write(stdout)

        if self.stderr_file:
            with open(self.stderr_file, 'a+') as f:
                f.write(stderr)

        # write the output keys
        output = {}

        if self.store_stdout:
            output['stdout'] = stdout

        if self.store_stderr:
            output['stderr'] = stderr

        output['returncode'] = returncodes[-1]
        output['all_returncodes'] = returncodes

        if self.defuse_bad_rc and sum(returncodes) != 0:
            return FWAction(stored_data=output, defuse_children=True)

        elif self.fizzle_bad_rc and sum(returncodes) != 0:
            raise RuntimeError(
                'ScriptTask fizzled! Return code: {}'.format(returncodes))

        return FWAction(stored_data=output)
Exemplo n.º 5
0
 def run_task(self, fw_spec):
     self.run_function(fw_spec)
     self.increment_counter(fw_spec)
     if not self.eval_condition(fw_spec):
         firework = Firework(
             tasks=[load_object(task) for task in fw_spec['_tasks']],
             spec=fw_spec,
             name=self._fw_name)
         return FWAction(detours=firework, exit=False)
     else:
         return FWAction(exit=True)
Exemplo n.º 6
0
 def run_task(self, fw_spec):
     user_incar_settings= {"NPAR": 2}
     if self.line:
         MPNonSCFVaspInputSet.from_previous_vasp_run(os.getcwd(), mode="Line", copy_chgcar=False,
                                                     user_incar_settings=user_incar_settings, kpoints_line_density=self.kpoints_line_density)
         kpath = HighSymmKpath(Poscar.from_file("POSCAR").structure)
         return FWAction(stored_data={"kpath": kpath.kpath,
                                      "kpath_name": kpath.name})
     else:
         MPNonSCFVaspInputSet.from_previous_vasp_run(os.getcwd(), mode="Uniform", copy_chgcar=False,
                              user_incar_settings=user_incar_settings, kpoints_density=self.kpoints_density)
         return FWAction()
Exemplo n.º 7
0
 def run_task(self, fw_spec):
     """
     setup up a measurement task using the prior calibration jobs
     and run
     """
     cal_objs = []
     logger.info(
         'The measurement task will be constructed from {} calibration objects'
         .format(len(fw_spec['cal_objs'])))
     for calparams in fw_spec['cal_objs']:
         calparams.update({'que_params': self.get('que_params')})
         cal = get_cal_obj(calparams)
         cal_objs.append(cal)
     done = load_class("mpinterfaces.calibrate",
                       "Calibrate").check_calcs(cal_objs)
     if not done:
         logger.info('Calibration not done yet. Try again later')
         logger.info('All subsequent fireworks will be defused')
         logger.info("""Try re-running this firework again later.
         Re-running this firework will activate all the subsequent foreworks too"""
                     )
         logger.info('This fireworks id = {}'.format(self.get("fw_id")))
         return FWAction(defuse_children=True)
         # to enable dynamic workflow, uncomment the following
         # if self.get("fw_id"):
         #    fw_id = int(self.get("fw_id")) + 1
         #    self["fw_id"] = fw_id
         #    new_fw = Firework(MPINTMeasurementTask(self),
         #                      spec={'cal_objs':fw_spec['cal_objs']},
         #                      name = 'new_fw', fw_id = -fw_id)
         # else:
         #    new_fw = Firework(MPINTMeasurementTask(self),
         #                      spec={'cal_objs':fw_spec['cal_objs']},
         #                      name = 'new_fw')
         #
         # return FWAction(detours=new_fw)
     else:
         measure = load_class("mpinterfaces.measurement",
                              self['measurement'])(cal_objs, **self.get(
                                  "other_params", {}))
         job_cmd = None
         if self.get("job_cmd", None) is not None:
             job_cmd = self.get("job_cmd")
         measure.setup()
         measure.run(job_cmd=job_cmd)
         cal_list = []
         for cal in measure.cal_objs:
             d = cal.as_dict()
             d.update({'que_params': self.get('que_params')})
             cal_list.append(d)
         return FWAction(update_spec={'cal_objs': cal_list})
Exemplo n.º 8
0
    def run_task(self, fw_spec):
        smaller = fw_spec['smaller']
        larger = fw_spec['larger']
        stop_point = fw_spec['stop_point']

        m_sum = smaller + larger
        if m_sum < stop_point:
            print 'The next Fibonacci number is: {}'.format(m_sum)
            # create a new Fibonacci Adder to add to the workflow
            new_fw = FireWork(FibonacciAdderTask(), {'smaller': larger, 'larger': m_sum, 'stop_point': stop_point})
            return FWAction('CREATE', {'next_fibnum': m_sum}, {'create_fw': new_fw})

        else:
            print 'We have now exceeded our limit; (the next Fibonacci number would have been: {})'.format(m_sum)
            return FWAction('CONTINUE')
Exemplo n.º 9
0
    def run_task(self, fw_spec):
        smaller = fw_spec['smaller']
        larger = fw_spec['larger']
        stop_point = fw_spec['stop_point']

        m_sum = smaller + larger
        if m_sum < stop_point:
            print('The next Fibonacci number is: {}'.format(m_sum))
            # create a new Fibonacci Adder to add to the workflow
            new_fw = Firework(FibonacciAdderTask(), {'smaller': larger, 'larger': m_sum, 'stop_point': stop_point})
            return FWAction(stored_data={'next_fibnum': m_sum}, additions=new_fw)

        else:
            print('We have now exceeded our limit; (the next Fibonacci number would have been: {})'.format(m_sum))
            return FWAction()
Exemplo n.º 10
0
    def task_analysis(self, fw_spec):
        """
        A relax task updates forwards an updated structure for the following tasks/FWs.
        If the status is Unconverged does not raise an exception, but creates a new FW.
        Previous dependencies are not forwarded to the new FW
        """

        # Raise an exception if the final status is not Unconverged or OK.
        if self.abitask.status < self.abitask.S_UNCONVERGED or self.abitask.status == self.abitask.S_ERROR:
            raise AbinitRuntimeError(self)

        if self.abitask.status == self.abitask.S_UNCONVERGED:
            stored_data = {'history': list(self.abitask.history)}
            if self.additional_steps <= 1:
                raise AbinitRuntimeError(self)
            new_task = self.copy()
            new_task.structure = self.abitask.read_final_structure()
            new_task.deps = self.parse_deps({self: "WFK"})
            new_task.additional_steps = self.additional_steps - 1
            new_spec = {'abi_deps': {'dep_' + str(self.task_id): os.getcwd()}}
            if '_queueadapter' in fw_spec:
                new_spec['_queueadapter'] = fw_spec.get('_queueadapter')
            if '_launch_dir' in fw_spec:
                new_spec['_launch_dir'] = self.new_workdir(
                    fw_spec['_launch_dir'])
            new_step = Firework(new_task, spec=new_spec)
            return FWAction(stored_data=stored_data, detours=new_step)
        else:
            return super(MultiStepRelaxStrategyFireTask,
                         self).task_analysis(fw_spec)
Exemplo n.º 11
0
    def decorate_fwaction(
        self, fwaction: FWAction, my_spec: Dict[str, any], m_fw: Firework, launch_dir: str
    ) -> FWAction:

        if my_spec.get("_pass_job_info"):
            job_info = list(my_spec.get("_job_info", []))
            this_job_info = {"fw_id": m_fw.fw_id, "name": m_fw.name, "launch_dir": launch_dir, "state": m_fw.state}
            if this_job_info not in job_info:
                job_info.append(this_job_info)
            fwaction.mod_spec.append({"_push_all": {"_job_info": job_info}})

        if my_spec.get("_preserve_fworker"):
            fwaction.update_spec["_fworker"] = self.fworker.name

        if my_spec.get("_files_out"):
            # One potential area of conflict is if a fw depends on two fws
            # and both fws generate the exact same file. That can lead to
            # overriding. But as far as I know, this is an illogical use
            # of a workflow, so I can't see it happening in normal use.
            for k, v in my_spec.get("_files_out").items():
                files = glob.glob(os.path.join(launch_dir, v))
                if files:
                    filepath = sorted(files)[-1]
                    fwaction.mod_spec.append({"_set": {f"_files_prev->{k:s}": filepath}})

        return fwaction
Exemplo n.º 12
0
 def run_task(self, fw_spec):
     task = SingleVaspGWWork(structure=self.structure,
                             job=self.job,
                             spec=self.spec,
                             option=self.option)
     task.create_input()
     return FWAction()
Exemplo n.º 13
0
    def run_task(self, fw_spec):
        name = self.get_system() + self.job
        logging.basicConfig(filename=os.path.join(self.get_launch_dir(),
                                                  name + '.log'),
                            level=logging.DEBUG)
        # vasp_exe = get_vasp_environment()['vasp_exe']
        # vasp_mpi_executer = get_vasp_environment()['vasp_mpi_executer']
        # n_tasks = GWG0W0VaspInputSet(self.structure).get_npar(self.structure)

        frontend_serial = True
        custodian = False

        if frontend_serial:
            # fake run, no actual vasp execution
            base = os.getcwdu()
            abs_work_dir = os.path.join(base, self.get_launch_dir())
            os.chdir(abs_work_dir)
            cmd = ["/home/setten/scripts/vasp"]
            subprocess.call(cmd)
            os.chdir(base)
        elif custodian:
            # mpirunvasp = [vasp_mpi_executer, '-np', n_tasks, vasp_exe]
            pass

        return FWAction()
Exemplo n.º 14
0
    def run_task(self, fw_spec):
        import ast
        f = ast.parse(self['expression'], mode='eval')
        assert isinstance(f.body, ast.Lambda)
        func = eval(compile(f, '', 'eval'))

        inputs = self.get('inputs', [])
        assert isinstance(inputs, list)
        args = [fw_spec[item] for item in inputs]

        output = func(*args)

        outputs = self.get('outputs', [])
        assert isinstance(outputs, list)
        actions = {}
        if len(outputs) == 1:
            if self.get('chunk_number') is None:
                actions['update_spec'] = {outputs[0]: output}
            else:
                if isinstance(output, (list, tuple, set)):
                    mod_spec = [{'_push': {outputs[0]: i}} for i in output]
                else:
                    mod_spec = [{'_push': {outputs[0]: output}}]
                actions['mod_spec'] = mod_spec
        elif len(outputs) > 1:
            assert isinstance(output, (list, tuple, set))
            assert len(output) == len(outputs)
            actions['update_spec'] = dict(zip(outputs, output))
        if self.get('stored_data_varname'):
            actions['stored_data'] = {self['stored_data_varname']: output}
        if len(actions) > 0:
            return FWAction(**actions)
Exemplo n.º 15
0
    def run_task(self, fw_spec):

        launch_dir = fw_spec['_launch_dir']
        gs_dir = fw_spec['ground_state_dir']

        # move the large density files to the workding directory
        for filename in ['AECCAR0', 'AECCAR2']:
            src = gs_dir + '/' + filename
            dst = launch_dir + '/' + filename
            shutil.move(src, dst)

        src = gs_dir + '/CHGCAR'
        dst = launch_dir + '/CHGCAR'
        shutil.copy(src, dst)

        with open('bader_job.sh', 'w') as f:
            f.write(BADER_job_template)

        os.system('bash bader_job.sh')

        # CASIR workaround
        self.change_gid()

        # end of the line
        return FWAction()
Exemplo n.º 16
0
    def run_task(self, fw_spec):
        chgcar_start = False
        # read the VaspInput from the previous run

        poscar = Poscar.from_file(zpath('POSCAR'))
        incar = Incar.from_file(zpath('INCAR'))

        # figure out what GGA+U values to use and override them
        # LDAU values to use
        mpvis = MPVaspInputSet()
        ggau_incar = mpvis.get_incar(poscar.structure).as_dict()
        incar_updates = {k: ggau_incar[k] for k in ggau_incar.keys() if 'LDAU' in k}

        for k in ggau_incar:
            # update any parameters not set explicitly in previous INCAR
            if k not in incar and k in ggau_incar:
                incar_updates[k] = ggau_incar[k]

        incar.update(incar_updates)  # override the +U keys


        # start from the CHGCAR of previous run
        if os.path.exists('CHGCAR'):
            incar['ICHARG'] = 1
            chgcar_start = True

        # write back the new INCAR to the current directory
        incar.write_file('INCAR')
        return FWAction(stored_data={'chgcar_start': chgcar_start})
Exemplo n.º 17
0
    def run_task(self, fw_spec):
        input_array = fw_spec['input_array']
        m_sum = sum(input_array)

        print("The sum of {} is: {}".format(input_array, m_sum))

        return FWAction(stored_data={'sum': m_sum})
Exemplo n.º 18
0
    def run_task(self, fw_spec):
        toks = self["func"].rsplit(".", 1)
        if len(toks) == 2:
            modname, funcname = toks
            mod = __import__(modname, globals(), locals(), [str(funcname)], 0)
            func = getattr(mod, funcname)
        else:
            #Handle built in functions.
            func = getattr(builtins, toks[0])

        args = self.get("args", [])
        if self.get("auto_kwargs"):
            kwargs = {
                k: v
                for k, v in self.items()
                if not (k.startswith("_") or k in self.required_params
                        or k in self.optional_params)
            }
        else:
            kwargs = self.get("kwargs", {})

        output = func(*args, **kwargs)
        if isinstance(output, FWAction):
            return output
        elif self.get("stored_data_varname"):
            return FWAction(stored_data={self["stored_data_varname"]: output})
Exemplo n.º 19
0
    def run_task(self, fw_spec):
        assert isinstance(self['split'], basestring), self['split']
        assert isinstance(fw_spec[self['split']], list)
        if isinstance(self['task']['inputs'], list):
            assert self['split'] in self['task']['inputs']
        else:
            assert self['split'] == self['task']['inputs']

        split_field = fw_spec[self['split']]
        lensplit = len(split_field)
        assert lensplit != 0, ('input to split is empty:', self['split'])

        nchunks = self.get('number of chunks')
        if not nchunks:
            nchunks = lensplit
        chunklen = lensplit // nchunks
        if lensplit % nchunks > 0:
            chunklen = chunklen + 1
        chunks = [
            split_field[i:i + chunklen] for i in range(0, lensplit, chunklen)
        ]

        fireworks = []
        for index, chunk in enumerate(chunks):
            spec = fw_spec.copy()
            spec[self['split']] = chunk
            task = load_object(self['task'])
            task['chunk_number'] = index
            name = self._fw_name + ' ' + str(index)
            fireworks.append(Firework(task, spec=spec, name=name))
        return FWAction(detours=fireworks)
Exemplo n.º 20
0
    def run_task(self, fw_spec):
        import json
        import operator
        from functools import reduce

        import ruamel.yaml as yaml

        filename = self["filename"]
        mapstring = self["mapstring"]
        assert isinstance(filename, basestring)
        assert isinstance(mapstring, basestring)
        maplist = mapstring.split("/")

        fmt = filename.split(".")[-1]
        assert fmt in ["json", "yaml"]
        with open(filename) as inp:
            data = json.load(inp) if fmt == "json" else yaml.safe_load(inp)

        leaf = reduce(operator.getitem, maplist[:-1], fw_spec)
        if isinstance(data, dict):
            if maplist[-1] not in leaf:
                leaf[maplist[-1]] = data
            else:
                leaf[maplist[-1]].update(data)
        else:
            leaf[maplist[-1]] = data

        return FWAction(update_spec={maplist[0]: fw_spec[maplist[0]]})
Exemplo n.º 21
0
    def run_task(self, fw_spec):
        assert isinstance(self["split"], basestring), self["split"]
        assert isinstance(fw_spec[self["split"]], list)
        if isinstance(self["task"]["inputs"], list):
            assert self["split"] in self["task"]["inputs"]
        else:
            assert self["split"] == self["task"]["inputs"]

        split_field = fw_spec[self["split"]]
        lensplit = len(split_field)
        assert lensplit != 0, ("input to split is empty:", self["split"])

        nchunks = self.get("number of chunks")
        if not nchunks:
            nchunks = lensplit
        chunklen = lensplit // nchunks
        if lensplit % nchunks > 0:
            chunklen = chunklen + 1
        chunks = [
            split_field[i:i + chunklen] for i in range(0, lensplit, chunklen)
        ]

        fireworks = []
        for index, chunk in enumerate(chunks):
            spec = fw_spec.copy()
            spec[self["split"]] = chunk
            task = load_object(self["task"])
            task["chunk_number"] = index
            name = f"{self._fw_name} {index}"
            fireworks.append(Firework(task, spec=spec, name=name))
        return FWAction(detours=fireworks)
Exemplo n.º 22
0
	def run_task(self,fw_spec): 
		jobID	= fw_spec['jobID'] 
		job 	= db2object(jobID)
		
		t0 				= time.time()

		atoms = job.atoms()
		job.optimizePos(atoms,job.calc())
		
		t 		= (time.time()-t0)/60.0 #min
		niter 	= 1 # ??? how to calculate this (from log file? different for gpaw and qe)
 		avgtime = t/niter
		
		pos 	= atoms.get_scaled_positions()
		e0 		= atoms.get_potential_energy()
		f0      = atoms.get_forces()
		magmom  = atoms.get_magnetic_moments() if any([x>0 for x in job.magmomsinit()]) else np.zeros(len(atoms))
		
		resultDict={'pos':pos,'magmom':magmom,'e0':e0,'f0':f0,'avgtime':avgtime,'niter':niter}
		
		if job.dftcode=='gpaw': 
			atoms.calc.write('inp.gpw', mode='all') #for use in getXCContribs
		
		io.write('out.traj',atoms)
		
		return FWAction(stored_data= resultDict,mod_spec=[{'_push': resultDict}])
Exemplo n.º 23
0
    def run_task(self, fw_spec):
        prev_dir = get_loc(fw_spec['prev_vasp_dir'])

        if '$ALL' in self.files:
            self.files = os.listdir(prev_dir)

        for file in self.files:
            prev_filename = last_relax(os.path.join(prev_dir, file))
            dest_file = 'POSCAR' if file == 'CONTCAR' and self.use_contcar else file
            if prev_filename.endswith('.gz'):
                dest_file += '.gz'

            print 'COPYING', prev_filename, dest_file
            if self.missing_CHGCAR_OK and 'CHGCAR' in dest_file and not os.path.exists(
                    zpath(prev_filename)):
                print 'Skipping missing CHGCAR'
            else:
                shutil.copy2(prev_filename, dest_file)
                if '.gz' in dest_file:
                    # unzip dest file
                    f = gzip.open(dest_file, 'rb')
                    file_content = f.read()
                    with open(dest_file[0:-3], 'wb') as f_out:
                        f_out.writelines(file_content)
                    f.close()
                    os.remove(dest_file)

        return FWAction(stored_data={'copied_files': self.files})
Exemplo n.º 24
0
    def run_task(self, fw_spec):
        nproc = os.environ['PBS_NP']

        # Figure out the appropriate Vasp Executable based on run machine
        if 'nid' in socket.gethostname():  # hopper compute nodes
            v_exe = shlex.split('aprun -n '+str(nproc)+' vasp')
            gv_exe = shlex.split('aprun -n '+str(nproc)+' gvasp')
            print 'running on HOPPER'
        elif 'c' in socket.gethostname():  # mendel compute nodes
            v_exe = shlex.split('mpirun -n '+str(nproc)+' vasp')
            gv_exe = shlex.split('mpirun -n '+str(nproc)+' gvasp')
            print 'running on MENDEL'
        else:
            raise ValueError('Unrecognized host!')

        # override vasp executable in custodian jobs
        for job in self.jobs:
            job.vasp_cmd = v_exe
            job.gamma_vasp_cmd = gv_exe

        # run the custodian
        c = Custodian(self.handlers, self.jobs, self.max_errors)
        c.run()

        update_spec = {'prev_vasp_dir': os.getcwd(),
                       'prev_task_type': fw_spec['task_type']}

        return FWAction(update_spec=update_spec)
Exemplo n.º 25
0
    def run_task(self, fw_spec):
        assert isinstance(self['output'], basestring)
        assert isinstance(self['inputs'], list)

        try:  # replace if / esle with try / except to find possibly nested val
            output = get_nested_dict_value(fw_spec, self['output'])
        except KeyError:
            output = {}

        assert isinstance(output, dict), "output must be dict."

        if self.get('rename'):
            assert isinstance(self.get('rename'), dict)
            rename = self.get('rename')
        else:
            rename = {}

        for item in self['inputs']:
            if item in rename:
                output = set_nested_dict_value(
                    output, self['rename'][item],
                    get_nested_dict_value(fw_spec, item))
                # replaces
                # output[self['rename'][item]] = fw_spec[item]
            else:
                output = set_nested_dict_value(
                    output, item, get_nested_dict_value(fw_spec, item))
                # replaces
                # output[item] = fw_spec[item]

        return FWAction(mod_spec=[{'_set': {self['output']: output}}])
Exemplo n.º 26
0
    def run_task(self, fw_spec):
        input_array = fw_spec['input_array']
        m_sum = sum(input_array)

        print "The sum of {} is: {}".format(input_array, m_sum)

        return FWAction('CONTINUE', {'sum': m_sum})
Exemplo n.º 27
0
    def run_task(self, fw_spec):
        qcinp = self._get_qcinp_from_fw_spec(fw_spec)
        mixed_basis = fw_spec.get("mixed_basis", None)
        mixed_aux_basis = fw_spec.get("mixed_aux_basis", None)
        implicit_solvent = fw_spec.get("implicit_solvent", None)

        custodian_out, prev_qchem_dir = self.run_qchem(qcinp, implicit_solvent,
                                                       mixed_aux_basis,
                                                       mixed_basis)
        all_errors = set()
        for run in custodian_out:
            for correction in run['corrections']:
                all_errors.update(correction['errors'])

        if MOVE_TO_EG_GARDEN:
            prev_qchem_dir = move_to_eg_garden(prev_qchem_dir)

        stored_data = {'error_list': list(all_errors)}
        update_spec = {
            'prev_qchem_dir': prev_qchem_dir,
            'prev_task_type': fw_spec['task_type']
        }
        propagate_keys = [
            'egsnl', 'snlgroup_id', 'inchi_root', 'mixed_basis',
            'mixed_aux_basis', 'mol'
        ]

        for k in propagate_keys:
            if k in fw_spec:
                update_spec[k] = fw_spec[k]

        return FWAction(stored_data=stored_data, update_spec=update_spec)
Exemplo n.º 28
0
    def run_task(self, fw_spec):
        from functools import reduce
        import operator
        import json
        import ruamel.yaml as yaml

        filename = self['filename']
        mapstring = self['mapstring']
        assert isinstance(filename, basestring)
        assert isinstance(mapstring, basestring)
        maplist = mapstring.split('/')

        fmt = filename.split('.')[-1]
        assert fmt in ['json', 'yaml']
        with open(filename, 'r') as inp:
            data = json.load(inp) if fmt == 'json' else yaml.safe_load(inp)

        leaf = reduce(operator.getitem, maplist[:-1], fw_spec)
        if isinstance(data, dict):
            if maplist[-1] not in leaf:
                leaf[maplist[-1]] = data
            else:
                leaf[maplist[-1]].update(data)
        else:
            leaf[maplist[-1]] = data

        return FWAction(update_spec={maplist[0]: fw_spec[maplist[0]]})
    def run_task(self, fw_spec):
        rbpf_batch = []
        if fw_spec['coord_ascent_iter'] > 0:
            assert('mod_direction' in fw_spec)
            fw_spec['results_folder'] = "%s/iterID_%d_dir-%s"%(fw_spec['results_folder'], fw_spec['coord_ascent_iter'],
                                                               fw_spec['mod_direction'])
        else:
            fw_spec['results_folder'] = "%s/iterID_%d"%(fw_spec['results_folder'], fw_spec['coord_ascent_iter'])

        setup_results_folder(fw_spec['results_folder'])
        rbpf_batch = []
        for run_idx in range(1, fw_spec['NUM_RUNS']+1):
            for seq_idx in fw_spec['TRAINING_SEQUENCES']:
                cur_spec = copy.deepcopy(fw_spec)
                cur_spec['run_idx'] = run_idx
                cur_spec['seq_idx'] = seq_idx
#                Q_idx = fw_spec['Q_idx']
#                if fw_spec['mod_direction'] == 'inc':
#                    cur_spec['Q'][Q_idx//4][Q_idx%4] += cur_spec['Q'][Q_idx//4][Q_idx%4]*fw_spec['mod_percent']/100.0
#                elif fw_spec['mod_direction'] == 'dec':
#                    cur_spec['Q'][Q_idx//4][Q_idx%4] -= cur_spec['Q'][Q_idx//4][Q_idx%4]*fw_spec['mod_percent']/100.0
#                else:
#                    assert(fw_spec['mod_direction'] == 'const')
                cur_firework = Firework(RunRBPF(), spec=cur_spec)
                rbpf_batch.append(cur_firework)

        parallel_workflow = Workflow(rbpf_batch)
        return FWAction(detours=parallel_workflow, mod_spec=[{'_set': {"results_folder": fw_spec['results_folder']}}])
Exemplo n.º 30
0
    def run_task(self, fw_spec):
        lp, fw_id = get_lp_and_fw_id_from_task(self, fw_spec)

        wf = lp.get_wf_by_fw_id(fw_id)
        wf_module = importlib.import_module(wf.metadata['workflow_module'])
        wf_class = getattr(wf_module, wf.metadata['workflow_class'])

        database = fw_spec['mongo_database']
        if self.criteria is not None:
            entry = database.get_entry(criteria=self.criteria)
        else:
            entry = {}

        inserted = []
        for root_key, method_name in self.insertion_data.items():
            get_results_method = getattr(wf_class, method_name)
            results = get_results_method(wf)
            for key, val in results.items():
                entry[key] = jsanitize(val)
                inserted.append(key)

        if self.criteria is not None:
            database.save_entry(entry=entry)
        else:
            database.insert_entry(entry=entry)

        logging.info("Inserted data:\n{}".format('- {}\n'.join(inserted)))
        return FWAction()
Exemplo n.º 31
0
    def recover_offline(self, launch_id, ignore_errors=False):
        # get the launch directory
        m_launch = self.get_launch_by_id(launch_id)
        try:
            self.m_logger.debug("RECOVERING fw_id: {}".format(m_launch.fw_id))
            # look for ping file - update the FireWork if this is the case
            ping_loc = os.path.join(m_launch.launch_dir, "FW_ping.json")
            if os.path.exists(ping_loc):
                with open(ping_loc) as f:
                    ping_time = datetime.datetime.strptime(json.loads(f.read())['ping_time'], "%Y-%m-%dT%H:%M:%S.%f")
                    self.ping_launch(launch_id, ping_time)

            # look for action in FW_offline.json
            offline_loc = os.path.join(m_launch.launch_dir, "FW_offline.json")
            with open(offline_loc) as f:
                offline_data = json.loads(f.read())
                if 'started_on' in offline_data:
                    m_launch.state = 'RUNNING'
                    for s in m_launch.state_history:
                        if s['state'] == 'RUNNING':
                            s['created_on'] = datetime.datetime.strptime(offline_data['started_on'], "%Y-%m-%dT%H:%M:%S.%f")
                    self.launches.find_and_modify({'launch_id': m_launch.launch_id}, m_launch.to_db_dict(), upsert=True)

                if 'fwaction' in offline_data:
                    fwaction = FWAction.from_dict(offline_data['fwaction'])
                    state = offline_data['state']
                    m_launch = Launch.from_dict(
                        self.complete_launch(launch_id, fwaction, state))
                    for s in m_launch.state_history:
                        if s['state'] == offline_data['state']:
                            s['created_on'] = datetime.datetime.strptime(offline_data['completed_on'], "%Y-%m-%dT%H:%M:%S.%f")
                    self.launches.find_and_modify({'launch_id': m_launch.launch_id}, m_launch.to_db_dict(), upsert=True)
                    self.offline_runs.update({"launch_id": launch_id}, {"$set": {"completed":True}})

            # update the updated_on
            self.offline_runs.update({"launch_id": launch_id}, {"$set": {"updated_on": datetime.datetime.utcnow().isoformat()}})
            return None
        except:
            if not ignore_errors:
                traceback.print_exc()
            return m_launch.fw_id
Exemplo n.º 32
0
    def run(self):
        """
        Run the rocket (check out a job from the database and execute it)
        """
        all_stored_data = {}  # combined stored data for *all* the Tasks
        all_update_spec = {}  # combined update_spec for *all* the Tasks
        all_mod_spec = []  # combined mod_spec for *all* the Tasks

        lp = self.launchpad
        launch_dir = os.path.abspath(os.getcwd())

        # check a FW job out of the launchpad
        if lp:
            m_fw, launch_id = lp.checkout_fw(self.fworker, launch_dir, self.fw_id)
        else:  # offline mode
            m_fw = Firework.from_file(os.path.join(os.getcwd(), "FW.json"))

            # set the run start time
            with open('FW_offline.json', 'r+') as f:
                d = json.loads(f.read())
                d['started_on'] = datetime.utcnow().isoformat()
                f.seek(0)
                f.write(json.dumps(d))
                f.truncate()

            launch_id = None  # we don't need this in offline mode...

        if not m_fw:
            print("No FireWorks are ready to run and match query! {}".format(self.fworker.query))
            return False

        if lp:
            message = 'RUNNING fw_id: {} in directory: {}'.\
                format(m_fw.fw_id, os.getcwd())
            lp.log_message(logging.INFO, message)

        # write FW.json and/or FW.yaml to the directory
        if PRINT_FW_JSON:
            m_fw.to_file('FW.json', indent=4)
        if PRINT_FW_YAML:
            m_fw.to_file('FW.yaml')

        try:
            if '_launch_dir' in m_fw.spec:
                prev_dir = launch_dir
                launch_dir = os.path.expandvars(m_fw.spec['_launch_dir'])
                # thread-safe "mkdir -p"
                try:
                    os.makedirs(launch_dir)
                except OSError as exception:
                    if exception.errno != errno.EEXIST:
                        raise
                os.chdir(launch_dir)
                launch_dir = os.path.abspath(os.getcwd())

                if lp:
                    lp.change_launch_dir(launch_id, launch_dir)

                if not os.listdir(prev_dir) and REMOVE_USELESS_DIRS:
                    try:
                        os.rmdir(prev_dir)
                    except:
                        pass

            if m_fw.spec.get('_recover_launch', None):
                launch_to_recover = lp.get_launch_by_id(m_fw.spec['_recover_launch']['_launch_id'])
                starting_task = launch_to_recover.action.stored_data.get('_exception', {}).get('_failed_task_n', 0)
                recover_launch_dir = launch_to_recover.launch_dir
                if lp:
                    lp.log_message(
                        logging.INFO,
                        'Recovering from task number {} in folder {}.'.format(starting_task, recover_launch_dir))
                if m_fw.spec['_recover_launch']['_recover_mode'] == 'cp' and launch_dir != recover_launch_dir:
                    if lp:
                        lp.log_message(
                            logging.INFO,
                            'Copying data from recovery folder {} to folder {}.'.format(recover_launch_dir, launch_dir))
                    distutils.dir_util.copy_tree(recover_launch_dir, launch_dir, update=1)

            else:
                starting_task = 0

            my_spec = dict(m_fw.spec)  # make a copy of spec, don't override original
            my_spec["_fw_env"] = self.fworker.env

            # set up heartbeat (pinging the server that we're still alive)
            ping_stop = start_ping_launch(lp, launch_id)

            # start background tasks
            btask_stops = []
            if '_background_tasks' in my_spec:
                for bt in my_spec['_background_tasks']:
                    btask_stops.append(start_background_task(bt, m_fw.spec))

            # execute the FireTasks!
            for t_counter, t in enumerate(m_fw.tasks[starting_task:], start=starting_task):
                if lp:
                    lp.log_message(logging.INFO, "Task started: %s." % t.fw_name)
                try:
                    m_action = t.run_task(my_spec)
                except BaseException as e:
                    traceback.print_exc()
                    tb = traceback.format_exc()
                    stop_backgrounds(ping_stop, btask_stops)
                    do_ping(lp, launch_id)  # one last ping, esp if there is a monitor
                    # If the exception is serializable, save its details
                    try:
                        exception_details = e.to_dict()
                    except AttributeError:
                        exception_details = None
                    except BaseException as e:
                        if lp:
                            lp.log_message(logging.WARNING, "Exception couldn't be serialized: %s " % e)
                        exception_details = None

                    try:
                        m_task = t.to_dict()
                    except:
                        m_task = None

                    m_action = FWAction(stored_data={'_message': 'runtime error during task', '_task': m_task,
                                                     '_exception': {'_stacktrace': tb, '_details': exception_details,
                                                                    '_failed_task_n': t_counter}}, exit=True)
                    if lp:
                        lp.complete_launch(launch_id, m_action, 'FIZZLED')
                    else:
                        with open('FW_offline.json', 'r+') as f:
                            d = json.loads(f.read())
                            d['fwaction'] = m_action.to_dict()
                            d['state'] = 'FIZZLED'
                            f.seek(0)
                            f.write(json.dumps(d))
                            f.truncate()

                    return True


                # read in a FWAction from a file, in case the task is not Python and cannot return it explicitly
                if os.path.exists('FWAction.json'):
                    m_action = FWAction.from_file('FWAction.json')
                elif os.path.exists('FWAction.yaml'):
                    m_action = FWAction.from_file('FWAction.yaml')

                if not m_action:
                    m_action = FWAction()

                # update the global stored data with the data to store and update from this particular Task
                all_stored_data.update(m_action.stored_data)
                all_update_spec.update(m_action.update_spec)
                all_mod_spec.extend(m_action.mod_spec)

                # update spec for next task as well
                my_spec.update(m_action.update_spec)
                for mod in m_action.mod_spec:
                    apply_mod(mod, my_spec)
                if lp:
                    lp.log_message(logging.INFO, "Task completed: %s " % t.fw_name)
                if m_action.skip_remaining_tasks:
                    break

            # add job packing info if this is needed
            if FWData().MULTIPROCESSING and STORE_PACKING_INFO:
                all_stored_data['multiprocess_name'] = multiprocessing.current_process().name

            # perform finishing operation
            stop_backgrounds(ping_stop, btask_stops)
            for b in btask_stops:
                b.set()
            do_ping(lp, launch_id)  # one last ping, esp if there is a monitor
            # last background monitors
            if '_background_tasks' in my_spec:
                for bt in my_spec['_background_tasks']:
                    if bt.run_on_finish:
                        for task in bt.tasks:
                            task.run_task(m_fw.spec)

            m_action.stored_data = all_stored_data
            m_action.mod_spec = all_mod_spec
            m_action.update_spec = all_update_spec

            if lp:
                lp.complete_launch(launch_id, m_action, 'COMPLETED')
            else:
                with open('FW_offline.json', 'r+') as f:
                    d = json.loads(f.read())
                    d['fwaction'] = m_action.to_dict()
                    d['state'] = 'COMPLETED'
                    d['completed_on'] = datetime.utcnow().isoformat()
                    f.seek(0)
                    f.write(json.dumps(d))
                    f.truncate()

            return True

        except:
            # problems while processing the results. high probability of malformed data.
            traceback.print_exc()
            stop_backgrounds(ping_stop, btask_stops)
            # restore initial state to prevent the raise of further exceptions
            if lp:
                lp.restore_backup_data(launch_id, m_fw.fw_id)

            do_ping(lp, launch_id)  # one last ping, esp if there is a monitor
            # the action produced by the task is discarded
            m_action = FWAction(stored_data={'_message': 'runtime error during task', '_task': None,
                                             '_exception': {'_stacktrace': traceback.format_exc(),
                                             '_details': None}}, exit=True)
            if lp:
                lp.complete_launch(launch_id, m_action, 'FIZZLED')
            else:
                with open('FW_offline.json', 'r+') as f:
                    d = json.loads(f.read())
                    d['fwaction'] = m_action.to_dict()
                    d['state'] = 'FIZZLED'
                    f.seek(0)
                    f.write(json.dumps(d))
                    f.truncate()

            return True
Exemplo n.º 33
0
    def run(self):
        """
        Run the rocket (check out a job from the database and execute it)
        """
        all_stored_data = {}  # combined stored data for *all* the Tasks
        all_update_spec = {}  # combined update_spec for *all* the Tasks
        all_mod_spec = []  # combined mod_spec for *all* the Tasks

        lp = self.launchpad
        launch_dir = os.path.abspath(os.getcwd())

        # check a FW job out of the launchpad
        if lp:
            m_fw, launch_id = lp.checkout_fw(self.fworker, launch_dir, self.fw_id)
        else:  # offline mode
            m_fw = Firework.from_file(os.path.join(os.getcwd(), "FW.json"))

            # set the run start time
            with open('FW_offline.json', 'r+') as f:
                d = json.loads(f.read())
                d['started_on'] = datetime.utcnow().isoformat()
                f.seek(0)
                f.write(json.dumps(d))
                f.truncate()

            launch_id = None  # we don't need this in offline mode...

        if not m_fw:
            print("No FireWorks are ready to run and match query! {}".format(self.fworker.query))
            return False

        if '_launch_dir' in m_fw.spec:
            prev_dir = launch_dir
            os.chdir(m_fw.spec['_launch_dir'])
            launch_dir = os.path.abspath(os.getcwd())

            if lp:
                lp.change_launch_dir(launch_id, launch_dir)

            if not os.listdir(prev_dir) and REMOVE_USELESS_DIRS:
                try:
                    os.rmdir(prev_dir)
                except:
                    pass

        if lp:
            message = 'RUNNING fw_id: {} in directory: {}'.\
                format(m_fw.fw_id, os.getcwd())
            lp.log_message(logging.INFO, message)

        # write FW.json and/or FW.yaml to the directory
        if PRINT_FW_JSON:
            m_fw.to_file('FW.json', indent=4)
        if PRINT_FW_YAML:
            m_fw.to_file('FW.yaml')

        try:
            my_spec = dict(m_fw.spec)  # make a copy of spec, don't override original
            my_spec["_fw_env"] = self.fworker.env

            # set up heartbeat (pinging the server that we're still alive)
            ping_stop = start_ping_launch(lp, launch_id)

            # start background tasks
            btask_stops = []
            if '_background_tasks' in my_spec:
                for bt in my_spec['_background_tasks']:
                    btask_stops.append(start_background_task(bt, m_fw.spec))

            # execute the FireTasks!
            for t in m_fw.tasks:
                lp.log_message(logging.INFO, "Task started: %s." % t.fw_name)
                m_action = t.run_task(my_spec)

                # read in a FWAction from a file, in case the task is not Python and cannot return it explicitly
                if os.path.exists('FWAction.json'):
                    m_action = FWAction.from_file('FWAction.json')
                elif os.path.exists('FWAction.yaml'):
                    m_action = FWAction.from_file('FWAction.yaml')

                if not m_action:
                    m_action = FWAction()

                # update the global stored data with the data to store and update from this particular Task
                all_stored_data.update(m_action.stored_data)
                all_update_spec.update(m_action.update_spec)
                all_mod_spec.extend(m_action.mod_spec)

                # update spec for next task as well
                my_spec.update(m_action.update_spec)
                for mod in m_action.mod_spec:
                    apply_mod(mod, my_spec)
                lp.log_message(logging.INFO, "Task completed: %s " % t.fw_name)
                if m_action.skip_remaining_tasks:
                    break

            # add job packing info if this is needed
            if FWData().MULTIPROCESSING and STORE_PACKING_INFO:
                all_stored_data['multiprocess_name'] = multiprocessing.current_process().name

            # perform finishing operation
            stop_backgrounds(ping_stop, btask_stops)
            for b in btask_stops:
                b.set()
            do_ping(lp, launch_id)  # one last ping, esp if there is a monitor
            # last background monitors
            if '_background_tasks' in my_spec:
                for bt in my_spec['_background_tasks']:
                    if bt.run_on_finish:
                        for task in bt.tasks:
                            task.run_task(m_fw.spec)

            m_action.stored_data = all_stored_data
            m_action.mod_spec = all_mod_spec
            m_action.update_spec = all_update_spec

            if lp:
                lp.complete_launch(launch_id, m_action, 'COMPLETED')
            else:
                with open('FW_offline.json', 'r+') as f:
                    d = json.loads(f.read())
                    d['fwaction'] = m_action.to_dict()
                    d['state'] = 'COMPLETED'
                    d['completed_on'] = datetime.utcnow().isoformat()
                    f.seek(0)
                    f.write(json.dumps(d))
                    f.truncate()

            return True

        except:
            stop_backgrounds(ping_stop, btask_stops)
            do_ping(lp, launch_id)  # one last ping, esp if there is a monitor
            traceback.print_exc()
            try:
                m_action = FWAction(stored_data={'_message': 'runtime error during task', '_task': t.to_dict(),
                                             '_exception': traceback.format_exc()}, exit=True)
            except:
                m_action = FWAction(stored_data={'_message': 'runtime error during task', '_task': None,
                                             '_exception': traceback.format_exc()}, exit=True)
            if lp:
                lp.complete_launch(launch_id, m_action, 'FIZZLED')
            else:
                with open('FW_offline.json', 'r+') as f:
                    d = json.loads(f.read())
                    d['fwaction'] = m_action.to_dict()
                    d['state'] = 'FIZZLED'
                    f.seek(0)
                    f.write(json.dumps(d))
                    f.truncate()

            return True
Exemplo n.º 34
0
    def run(self, pdb_on_exception=False):
        """
        Run the rocket (check out a job from the database and execute it)

        Args:
            pdb_on_exception (bool): whether to invoke the debugger on
                a caught exception.  Default False.
        """
        all_stored_data = {}  # combined stored data for *all* the Tasks
        all_update_spec = {}  # combined update_spec for *all* the Tasks
        all_mod_spec = []  # combined mod_spec for *all* the Tasks

        lp = self.launchpad
        launch_dir = os.path.abspath(os.getcwd())
        logdir = lp.get_logdir() if lp else None
        l_logger = get_fw_logger('rocket.launcher', l_dir=logdir,
                                 stream_level=ROCKET_STREAM_LOGLEVEL)

        # check a FW job out of the launchpad
        if lp:
            m_fw, launch_id = lp.checkout_fw(self.fworker, launch_dir, self.fw_id)
        else:  # offline mode
            m_fw = Firework.from_file(os.path.join(os.getcwd(), "FW.json"))

            # set the run start time
            fpath = zpath("FW_offline.json")
            with zopen(fpath) as f_in:
                d = json.loads(f_in.read())
                d['started_on'] = datetime.utcnow().isoformat()
                with zopen(fpath, "wt") as f_out:
                    f_out.write(json.dumps(d, ensure_ascii=False))

            launch_id = None  # we don't need this in offline mode...

        if not m_fw:
            print("No FireWorks are ready to run and match query! {}".format(self.fworker.query))
            return False

        final_state = None
        ping_stop = None
        btask_stops = []

        try:
            if '_launch_dir' in m_fw.spec and lp:
                prev_dir = launch_dir
                launch_dir = os.path.expandvars(m_fw.spec['_launch_dir'])
                if not os.path.abspath(launch_dir):
                    launch_dir = os.path.normpath(os.path.join(os.getcwd(), launch_dir))
                # thread-safe "mkdir -p"
                try:
                    os.makedirs(launch_dir)
                except OSError as exception:
                    if exception.errno != errno.EEXIST:
                        raise
                os.chdir(launch_dir)

                if not os.path.samefile(launch_dir, prev_dir):
                    lp.change_launch_dir(launch_id, launch_dir)

                if not os.listdir(prev_dir) and REMOVE_USELESS_DIRS:
                    try:
                        os.rmdir(prev_dir)
                    except:
                        pass

            recovery = m_fw.spec.get('_recovery', None)
            if recovery:
                recovery_dir = recovery.get('_prev_dir')
                recovery_mode = recovery.get('_mode')
                starting_task = recovery.get('_task_n')
                all_stored_data.update(recovery.get('_all_stored_data'))
                all_update_spec.update(recovery.get('_all_update_spec'))
                all_mod_spec.extend(recovery.get('_all_mod_spec'))
                if lp:
                    l_logger.log(
                                logging.INFO,
                                'Recovering from task number {} in folder {}.'.format(starting_task,
                                                                                      recovery_dir))
                if recovery_mode == 'cp' and launch_dir != recovery_dir:
                    if lp:
                        l_logger.log(
                                    logging.INFO,
                                    'Copying data from recovery folder {} to folder {}.'.format(recovery_dir,
                                                                                                launch_dir))
                    distutils.dir_util.copy_tree(recovery_dir, launch_dir, update=1)

            else:
                starting_task = 0
                files_in = m_fw.spec.get("_files_in", {})
                prev_files = m_fw.spec.get("_files_prev", {})
                for f in set(files_in.keys()).intersection(prev_files.keys()):
                    # We use zopen for the file objects for transparent handling
                    # of zipped files. shutil.copyfileobj does the actual copy
                    # in chunks that avoid memory issues.
                    with zopen(prev_files[f], "rb") as fin, zopen(files_in[f], "wb") as fout:
                        shutil.copyfileobj(fin, fout)

            if lp:
                message = 'RUNNING fw_id: {} in directory: {}'.\
                    format(m_fw.fw_id, os.getcwd())
                l_logger.log(logging.INFO, message)

            # write FW.json and/or FW.yaml to the directory
            if PRINT_FW_JSON:
                m_fw.to_file('FW.json', indent=4)
            if PRINT_FW_YAML:
                m_fw.to_file('FW.yaml')

            my_spec = dict(m_fw.spec)  # make a copy of spec, don't override original
            my_spec["_fw_env"] = self.fworker.env

            # set up heartbeat (pinging the server that we're still alive)
            ping_stop = start_ping_launch(lp, launch_id)

            # start background tasks
            if '_background_tasks' in my_spec:
                for bt in my_spec['_background_tasks']:
                    btask_stops.append(start_background_task(bt, m_fw.spec))

            # execute the Firetasks!
            for t_counter, t in enumerate(m_fw.tasks[starting_task:], start=starting_task):
                checkpoint = {'_task_n': t_counter,
                              '_all_stored_data': all_stored_data,
                              '_all_update_spec': all_update_spec,
                              '_all_mod_spec': all_mod_spec}
                Rocket.update_checkpoint(lp, launch_dir, launch_id, checkpoint)
 
                if lp:
                   l_logger.log(logging.INFO, "Task started: %s." % t.fw_name)

                if my_spec.get("_add_launchpad_and_fw_id"):
                    t.fw_id = m_fw.fw_id
                    if FWData().MULTIPROCESSING:
                        # hack because AutoProxy manager can't access attributes
                        t.launchpad = LaunchPad.from_dict(self.launchpad.to_dict())
                    else:
                        t.launchpad = self.launchpad

                if my_spec.get("_add_fworker"):
                    t.fworker = self.fworker

                try:
                    m_action = t.run_task(my_spec)
                except BaseException as e:
                    traceback.print_exc()
                    tb = traceback.format_exc()
                    stop_backgrounds(ping_stop, btask_stops)
                    do_ping(lp, launch_id)  # one last ping, esp if there is a monitor
                    # If the exception is serializable, save its details
                    if pdb_on_exception:
                        pdb.post_mortem()
                    try:
                        exception_details = e.to_dict()
                    except AttributeError:
                        exception_details = None
                    except BaseException as e:
                        if lp:
                            l_logger.log(logging.WARNING,
                                        "Exception couldn't be serialized: %s " % e)
                        exception_details = None

                    try:
                        m_task = t.to_dict()
                    except:
                        m_task = None

                    m_action = FWAction(stored_data={'_message': 'runtime error during task',
                                                     '_task': m_task,
                                                     '_exception': {'_stacktrace': tb,
                                                                    '_details': exception_details}},
                                        exit=True)
                    m_action = self.decorate_fwaction(m_action, my_spec, m_fw, launch_dir)

                    if lp:
                        final_state = 'FIZZLED'
                        lp.complete_launch(launch_id, m_action, final_state)
                    else:
                        fpath = zpath("FW_offline.json")
                        with zopen(fpath) as f_in:
                            d = json.loads(f_in.read())
                            d['fwaction'] = m_action.to_dict()
                            d['state'] = 'FIZZLED'
                            d['completed_on'] = datetime.utcnow().isoformat()
                            with zopen(fpath, "wt") as f_out:
                                f_out.write(json.dumps(d, ensure_ascii=False))

                    return True

                # read in a FWAction from a file, in case the task is not Python and cannot return
                # it explicitly
                if os.path.exists('FWAction.json'):
                    m_action = FWAction.from_file('FWAction.json')
                elif os.path.exists('FWAction.yaml'):
                    m_action = FWAction.from_file('FWAction.yaml')

                if not m_action:
                    m_action = FWAction()

                # update the global stored data with the data to store and update from this
                # particular Task
                all_stored_data.update(m_action.stored_data)
                all_update_spec.update(m_action.update_spec)
                all_mod_spec.extend(m_action.mod_spec)

                # update spec for next task as well
                my_spec.update(m_action.update_spec)
                for mod in m_action.mod_spec:
                    apply_mod(mod, my_spec)
                if lp:
                    l_logger.log(logging.INFO, "Task completed: %s " % t.fw_name)
                if m_action.skip_remaining_tasks:
                    break

            # add job packing info if this is needed
            if FWData().MULTIPROCESSING and STORE_PACKING_INFO:
                all_stored_data['multiprocess_name'] = multiprocessing.current_process().name

            # perform finishing operation
            stop_backgrounds(ping_stop, btask_stops)
            for b in btask_stops:
                b.set()
            do_ping(lp, launch_id)  # one last ping, esp if there is a monitor
            # last background monitors
            if '_background_tasks' in my_spec:
                for bt in my_spec['_background_tasks']:
                    if bt.run_on_finish:
                        for task in bt.tasks:
                            task.run_task(m_fw.spec)

            m_action.stored_data = all_stored_data
            m_action.mod_spec = all_mod_spec
            m_action.update_spec = all_update_spec

            m_action = self.decorate_fwaction(m_action, my_spec, m_fw, launch_dir)

            if lp:
                final_state = 'COMPLETED'
                lp.complete_launch(launch_id, m_action, final_state)
            else:

                fpath = zpath("FW_offline.json")
                with zopen(fpath) as f_in:
                    d = json.loads(f_in.read())
                    d['fwaction'] = m_action.to_dict()
                    d['state'] = 'COMPLETED'
                    d['completed_on'] = datetime.utcnow().isoformat()
                    with zopen(fpath, "wt") as f_out:
                        f_out.write(json.dumps(d, ensure_ascii=False))

            return True

        except LockedWorkflowError as e:
            l_logger.log(logging.DEBUG, traceback.format_exc())
            l_logger.log(logging.WARNING,
                           "Firework {} reached final state {} but couldn't complete the update of "
                           "the database. Reason: {}\nRefresh the WF to recover the result "
                           "(lpad admin refresh -i {}).".format(
                               self.fw_id, final_state, e, self.fw_id))
            return True

        except:
            # problems while processing the results. high probability of malformed data.
            traceback.print_exc()
            stop_backgrounds(ping_stop, btask_stops)
            # restore initial state to prevent the raise of further exceptions
            if lp:
                lp.restore_backup_data(launch_id, m_fw.fw_id)

            do_ping(lp, launch_id)  # one last ping, esp if there is a monitor
            # the action produced by the task is discarded
            m_action = FWAction(stored_data={'_message': 'runtime error during task', '_task': None,
                                             '_exception': {'_stacktrace': traceback.format_exc(),
                                                            '_details': None}},
                                exit=True)

            try:
                m_action = self.decorate_fwaction(m_action, my_spec, m_fw, launch_dir)
            except:
                traceback.print_exc()

            if lp:
                try:
                    lp.complete_launch(launch_id, m_action, 'FIZZLED')
                except LockedWorkflowError as e:
                    l_logger.log(logging.DEBUG, traceback.format_exc())
                    l_logger.log(logging.WARNING,
                                   "Firework {} fizzled but couldn't complete the update of the database."
                                   " Reason: {}\nRefresh the WF to recover the result "
                                   "(lpad admin refresh -i {}).".format(
                                       self.fw_id, final_state, e, self.fw_id))
                    return True
            else:
                fpath = zpath("FW_offline.json")
                with zopen(fpath) as f_in:
                    d = json.loads(f_in.read())
                    d['fwaction'] = m_action.to_dict()
                    d['state'] = 'FIZZLED'
                    d['completed_on'] = datetime.utcnow().isoformat()
                    with zopen(fpath, "wt") as f_out:
                        f_out.write(json.dumps(d, ensure_ascii=False))

            return True