Beispiel #1
0
def prep_vertical_ip(path):
    # Given a path to the outfile of a finished run, this preps the files for a corresponding vertical IP run
    # Returns a list of the PATH(s) to the jobscript(s) to start the vertical IP calculations(s)
    home = os.getcwd()
    path = convert_to_absolute_path(path)

    results = manager_io.read_outfile(path)
    if not results['finished']:
        raise Exception('This calculation does not appear to be complete! Aborting...')

    infile_dict = manager_io.read_infile(path)

    if infile_dict['spinmult'] == 1:
        new_spin = [2]
    else:
        new_spin = [infile_dict['spinmult'] - 1, infile_dict['spinmult'] + 1]

    base = os.path.split(path)[0]
    
    if infile_dict['run_type'] == 'minimize':
        optimxyz = os.path.join(base, 'scr', 'optim.xyz')
    else:
        optimxyz = os.path.join(base, 'scr', 'xyz.xyz')
    extract_optimized_geo(optimxyz)

    ipname = results['name'] + '_vertIP'
    vertip_base_path = os.path.join(base, ipname)
    if os.path.isdir(vertip_base_path):
        return ['Directory for vertIP single point already exists']
    os.mkdir(vertip_base_path)
    os.chdir(vertip_base_path)

    jobscripts = []
    for calc in new_spin:
        if calc < 7:
            name = results['name'] + '_vertIP_' + str(calc)
            PATH = os.path.join(vertip_base_path, str(calc))
            if os.path.isdir(PATH):
                jobscripts.append('File for vert IP spin ' + str(calc) + 'already exists')
            else:
                os.mkdir(PATH)
                os.chdir(PATH)

                shutil.copyfile(os.path.join(base, 'scr', 'optimized.xyz'), os.path.join(PATH, name + '.xyz'))

                local_infile_dict = copy.copy(infile_dict)
                local_infile_dict['charge'], local_infile_dict['guess'] = infile_dict['charge'] + 1, False
                local_infile_dict['run_type'], local_infile_dict['spinmult'] = 'energy', calc
                local_infile_dict['name'] = name
                local_infile_dict['levelshifta'], local_infile_dict['levelshiftb'] = 0.25, 0.25
                local_infile_dict['machine'] = machine
                manager_io.write_input(local_infile_dict)
                manager_io.write_jobscript(name,machine=machine)

                jobscripts.append(os.path.join(PATH, name + '_jobscript'))

    os.chdir(home)

    return jobscripts
Beispiel #2
0
def read_run(outfile_PATH):
    # Evaluates all aspects of a run using the outfile and derivative files
    results = manager_io.read_outfile(outfile_PATH, long_output=True)
    infile_dict = manager_io.read_infile(outfile_PATH)
    results['levela'], results['levelb'] = infile_dict['levelshifta'], infile_dict['levelshiftb']
    results['method'], results['hfx'] = infile_dict['method'], infile_dict['hfx']
    results['constraints'] = infile_dict['constraints']

    mullpop_path = os.path.join(os.path.split(outfile_PATH)[0], 'scr', 'mullpop')
    if os.path.exists(mullpop_path):
        mullpops = manager_io.read_mullpop(outfile_PATH)
        metal_types = ['Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Mo', 'Tc', 'Ru', 'Rh']
        metals = [i for i in mullpops if i.split()[0] in metal_types]
        if len(metals) > 1:
            results['metal_spin'] = np.nan
        elif len(metals) == 0:
            pass
        else:
            results['metal_spin'] = float(metals[0].split()[-1])
    else:
        results['metal_spin'] = np.nan

    optim_path = os.path.join(os.path.split(outfile_PATH)[0], 'scr', 'optim.xyz')

    check_geo = False
    if os.path.isfile(optim_path):
        fil = open(optim_path, 'r')
        lines = fil.readlines()
        fil.close()
        if len(lines) > 0:
            check_geo = True  # Only apply geo check if an optimized geometry exists

    if check_geo:
        tools.extract_optimized_geo(optim_path)
        optimized_path = os.path.join(os.path.split(optim_path)[0], 'optimized.xyz')

        mol = mol3D()
        mol.readfromxyz(optimized_path)

        IsOct, flag_list, oct_check = mol.IsOct(dict_check=mol.dict_oct_check_st,
                                                silent=True)

        if IsOct:
            IsOct = True
        else:
            IsOct = False

        results['Is_Oct'] = IsOct
        results['Flag_list'] = flag_list
        results['Oct_check_details'] = oct_check

    else:
        results['Is_Oct'] = None
        results['Flag_list'] = None
        results['Oct_check_details'] = None

    return results
Beispiel #3
0
def prep_thermo(path):
    # Given a path to the outfile of a finished run, this preps the files for a thermo calculation
    # Uses the wavefunction from the previous calculation as an initial guess
    # Returns a list of the PATH(s) to the jobscript(s) to start the solvent sp calculations(s)
    home = os.getcwd()
    path = convert_to_absolute_path(path)

    results = manager_io.read_outfile(path)
    infile_dict = manager_io.read_infile(path)

    base = os.path.split(path)[0]

    if infile_dict['run_type'] == 'minimize':
        optimxyz = os.path.join(base, 'scr', 'optim.xyz')
    else:
        optimxyz = os.path.join(base, 'scr', 'xyz.xyz')
    extract_optimized_geo(optimxyz)

    # Now, start generating the new directory
    name = results['name'] + '_thermo'
    PATH = os.path.join(base, name)
    if os.path.isdir(PATH):
        return ['Thermo Calculation Directory already exists']

    os.mkdir(PATH)
    os.chdir(PATH)

    shutil.copyfile(os.path.join(base, 'scr', 'optimized.xyz'),
                    os.path.join(PATH, name + '.xyz'))
    local_infile_dict = copy.copy(infile_dict)
    local_infile_dict['guess'] = True
    local_infile_dict['run_type'] = 'frequencies'
    local_infile_dict['name'] = name
    manager_io.write_input(local_infile_dict)
    if infile_dict['spinmult'] == 1:
        shutil.copyfile(os.path.join(base, 'scr', 'c0'),
                        os.path.join(PATH, 'c0'))
        manager_io.write_jobscript(name, custom_line='# -fin c0')
    if infile_dict['spinmult'] != 1:
        shutil.copyfile(os.path.join(base, 'scr', 'ca0'),
                        os.path.join(PATH, 'ca0'))
        shutil.copyfile(os.path.join(base, 'scr', 'cb0'),
                        os.path.join(PATH, 'cb0'))
        manager_io.write_jobscript(
            name, custom_line=['# -fin ca0\n', '# -fin cb0\n'])

    os.chdir(home)
    return [os.path.join(PATH, name + '_jobscript')]
Beispiel #4
0
def prep_ligand_breakown(outfile_path):
    # Given a path to the outfile of a finished run, this preps the files for rigid ligand dissociation energies of all ligands
    # Returns a list of the PATH(s) to the jobscript(s) to start the rigid ligand calculations

    home = os.getcwd()
    outfile_path = tools.convert_to_absolute_path(outfile_path)

    results = manager_io.read_outfile(outfile_path)
    if not results['finished']:
        raise Exception(
            'This calculation does not appear to be complete! Aborting...')

    infile_dict = manager_io.read_infile(outfile_path)
    charge = int(infile_dict['charge'])
    spinmult = int(infile_dict['spinmult'])

    base = os.path.split(outfile_path)[0]
    name = os.path.split(outfile_path)[-1][:-4]

    breakdown_folder = os.path.join(base, name + '_dissociation')

    if os.path.isdir(breakdown_folder):
        return ['Ligand dissociation directory already exists']

    optimxyz = os.path.join(base, 'scr', 'optim.xyz')
    tools.extract_optimized_geo(optimxyz)

    mol = mol3D()
    mol.readfromxyz(os.path.join(base, 'scr', 'optimized.xyz'))

    ligand_idxs, _, _ = ligand_breakdown(mol, silent=True)

    ligand_syms = []
    for ii in ligand_idxs:
        ligand_syms.append([mol.getAtom(i).symbol() for i in ii])

    ligand_names = name_ligands(ligand_syms)

    if not os.path.isdir(breakdown_folder):
        os.mkdir(breakdown_folder)
    os.chdir(breakdown_folder)

    jobscripts = []
    for ligand in zip(ligand_names, ligand_idxs):

        # Assign charges to use during the breakdown for special cases...oxygen, hydroxide, peroxide, and acac
        # All other ligands are currently assigned charge 0
        ligand_charges = {'O1': -2, 'H1O1': -1, 'H1O2': -1, 'C5H7O2': -1}
        if ligand[0] in list(ligand_charges.keys()):
            ligand_charge = ligand_charges[ligand[0]]
        else:
            ligand_charge = 0
        metal_charge = charge - ligand_charge

        # Assign spin, which always remains with the metal except for when an O2 leaves
        if spinmult == 1:  # If the whole complex is restricted, it's components must be restricted as well
            ligand_spin, metal_spin = 1, 1
        else:
            ligand_spinmults = {'O2': 3}
            if ligand[0] in list(ligand_spinmults.keys()):
                ligand_spin = ligand_spinmults[ligand[0]]
            else:
                ligand_spin = 1

            metal_spin = spinmult - ligand_spin + 1  # Derived from spinmult = (2S+1) where S=1/2 per electron

        # Create the necessary files for the metal complex single point
        local_name = name + '_rm_' + ligand[0]
        if os.path.isdir('rm_' + ligand[0]):
            pass
        else:
            os.mkdir('rm_' + ligand[0])
            os.chdir('rm_' + ligand[0])

            local_mol = mol3D()
            local_mol.copymol3D(mol)
            local_mol.deleteatoms(ligand[1])
            local_mol.writexyz(local_name + '.xyz')

            local_infile_dict = copy.copy(infile_dict)
            local_infile_dict['name'] = local_name
            local_infile_dict['charge'], local_infile_dict[
                'spinmult'] = metal_charge, metal_spin
            local_infile_dict['run_type'] = 'energy'
            local_infile_dict['constraints'], local_infile_dict[
                'convergence_thresholds'] = False, False

            manager_io.write_input(local_infile_dict)
            manager_io.write_jobscript(local_name,
                                       time_limit='12:00:00',
                                       sleep=True)
            jobscripts.append(local_name + '.in')
            os.chdir('..')

        # Create the necessary files for the dissociated ligand single point
        local_name = name + '_kp_' + ligand[0]
        if os.path.isdir('kp_' + ligand[0]):
            pass
        else:
            os.mkdir('kp_' + ligand[0])
            os.chdir('kp_' + ligand[0])

            local_mol = mol3D()
            local_mol.copymol3D(mol)
            deletion_indices = list(
                set(range(local_mol.natoms)) - set(ligand[1]))
            local_mol.deleteatoms(deletion_indices)
            local_mol.writexyz(local_name + '.xyz')

            local_infile_dict = copy.copy(infile_dict)
            local_infile_dict['name'] = local_name
            local_infile_dict['charge'], local_infile_dict[
                'spinmult'] = ligand_charge, ligand_spin
            local_infile_dict['run_type'] = 'energy'
            local_infile_dict['constraints'], local_infile_dict[
                'convergence_thresholds'] = False, False

            manager_io.write_input(local_infile_dict)
            manager_io.write_jobscript(local_name,
                                       time_limit='12:00:00',
                                       sleep=True)
            jobscripts.append(local_name + '.in')
            os.chdir('..')
    os.chdir(home)

    return jobscripts
Beispiel #5
0
def resub(directory='in place'):
    # Takes a directory, resubmits errors, scf failures, and spin contaminated cases
    configure_dict = manager_io.read_configure(directory, None)
    max_resub = configure_dict['max_resub']
    max_jobs = configure_dict['max_jobs']
    hard_job_limit = configure_dict['hard_job_limit']
    hit_queue_limit = False  # Describes if this run has limitted the number of jobs submitted to work well with the queue
    # Get the state of all jobs being managed by this instance of the job manager
    completeness = moltools.check_completeness(directory,
                                               max_resub,
                                               configure_dict=configure_dict)
    print("completeness: ", completeness)
    errors = completeness[
        'Error']  # These are calculations which failed to complete
    scf_errors = completeness[
        'SCF_Error']  # These are calculations which failed to complete, appear to have an scf error, and hit wall time
    oscillating_scf_errors = completeness[
        'oscillating_scf_errors']  # These are calculations which failed to complete, appear to have an oscillaing scf error,
    need_resub = completeness[
        'Needs_resub']  # These are calculations with level shifts changed or hfx exchange changed
    spin_contaminated = completeness[
        'Spin_contaminated']  # These are finished jobs with spin contaminated solutions
    active = completeness[
        'Active']  # These are jobs which are currently running
    thermo_grad_error = completeness[
        'Thermo_grad_error']  # These are thermo jobs encountering the thermo grad error
    waiting = completeness[
        'Waiting']  # These are jobs which are or were waiting for another job to finish before continuing.
    bad_geos = completeness[
        'Bad_geos']  # These are jobs which finished, but converged to a bad geometry.
    finished = completeness['Finished']
    molscontrol_kills = completeness['molscontrol_kills']
    nactive = tools.get_number_active(
    )  # number of active jobs, counting bundled jobs as a single job
    # Kill SCF errors in progress, which are wasting computational resources
    all_scf_errors = completeness[
        'SCF_Errors_Including_Active']  # These are all jobs which appear to have scf error, including active ones
    scf_errors_to_kill = [
        scf_err for scf_err in all_scf_errors if scf_err not in scf_errors
    ]
    names_to_kill = [
        os.path.split(scf_err)[-1].rsplit('.', 1)[0]
        for scf_err in scf_errors_to_kill
    ]
    kill_jobs(names_to_kill,
              message1='Job: ',
              message2=' appears to have an scf error. Killing this job early')
    # Prep derivative jobs such as thermo single points, vertical IP, and ligand dissociation energies
    needs_derivative_jobs = list(filter(tools.check_original, finished))
    print("needs_derivative_jobs: ", needs_derivative_jobs)
    prep_derivative_jobs(directory, needs_derivative_jobs)
    resubmitted = [
    ]  # Resubmitted list gets True if the job is submitted or False if not. Contains booleans, not job identifiers.

    for job in molscontrol_kills:
        print("killed by molscontrol: ", job)
    # Resub unidentified errors
    for error in errors:
        if ((nactive + np.sum(resubmitted)) >= max_jobs) or (
            (tools.get_total_queue_usage() + np.sum(resubmitted)) >=
                hard_job_limit):
            hit_queue_limit = True
            continue
        resub_tmp = recovery.simple_resub(error)
        if resub_tmp:
            print(('Unidentified error in job: ' + os.path.split(error)[-1] +
                   ' -Resubmitting'))
            print('')
        resubmitted.append(resub_tmp)

    # Resub oscillating_scf convergence errors
    for error in oscillating_scf_errors:
        if ((nactive + np.sum(resubmitted)) >= max_jobs) or (
            (tools.get_total_queue_usage() + np.sum(resubmitted)) >=
                hard_job_limit):
            hit_queue_limit = True
            continue
        local_configure = manager_io.read_configure(directory, None)
        if 'scf' in local_configure['job_recovery']:
            resub_tmp = recovery.resub_oscillating_scf(error)
            if resub_tmp:
                print(('Oscillating SCF error identified in job: ' +
                       os.path.split(error)[-1] +
                       ' -Resubmitting with adjusted precision and grid.'))
                print('')
            resubmitted.append(resub_tmp)

    # Resub scf convergence errors
    for error in scf_errors:
        if ((nactive + np.sum(resubmitted)) >= max_jobs) or (
            (tools.get_total_queue_usage() + np.sum(resubmitted)) >=
                hard_job_limit):
            hit_queue_limit = True
            continue
        local_configure = manager_io.read_configure(directory, None)
        if 'scf' in local_configure['job_recovery']:
            resub_tmp = recovery.resub_scf(error)
            if resub_tmp:
                print(('SCF error identified in job: ' +
                       os.path.split(error)[-1] +
                       ' -Resubmitting with adjusted levelshifts'))
                print('')
            resubmitted.append(resub_tmp)

    # Resub jobs which converged to bad geometries with additional constraints
    for error in bad_geos:
        if ((nactive + np.sum(resubmitted)) >= max_jobs) or (
            (tools.get_total_queue_usage() + np.sum(resubmitted)) >=
                hard_job_limit):
            hit_queue_limit = True
            continue
        local_configure = manager_io.read_configure(directory, None)
        if 'bad_geo' in local_configure['job_recovery']:
            resub_tmp = recovery.resub_bad_geo(error, directory)
            if resub_tmp:
                print((
                    'Bad final geometry in job: ' + os.path.split(error)[-1] +
                    ' -Resubmitting from initial structure with additional constraints'
                ))
                print('')
            resubmitted.append(resub_tmp)

    # Resub spin contaminated cases
    for error in spin_contaminated:
        if ((nactive + np.sum(resubmitted)) >= max_jobs) or (
            (tools.get_total_queue_usage() + np.sum(resubmitted)) >=
                hard_job_limit):
            hit_queue_limit = True
            continue
        local_configure = manager_io.read_configure(directory, None)
        if 'spin_contaminated' in local_configure['job_recovery']:
            resub_tmp = recovery.resub_spin(error)
            if resub_tmp:
                print(('Spin contamination identified in job: ' +
                       os.path.split(error)[-1] +
                       ' -Resubmitting with adjusted HFX'))
                print('')
            resubmitted.append(resub_tmp)

    # Resub jobs with atypical parameters used to aid convergence
    for error in need_resub:
        if ((nactive + np.sum(resubmitted)) >= max_jobs) or (
            (tools.get_total_queue_usage() + np.sum(resubmitted)) >=
                hard_job_limit):
            hit_queue_limit = True
            continue
        resub_tmp = recovery.clean_resub(error)
        if resub_tmp:
            print(('Job ' + os.path.split(error)[-1] +
                   ' needs to be rerun with typical paramters. -Resubmitting'))
            print('')
        resubmitted.append(resub_tmp)

    # Create a job with a tighter convergence threshold for failed thermo jobs
    for error in thermo_grad_error:
        if ((nactive + np.sum(resubmitted)) >= max_jobs) or (
            (tools.get_total_queue_usage() + np.sum(resubmitted)) >=
                hard_job_limit):
            hit_queue_limit = True
            continue
        local_configure = manager_io.read_configure(directory, None)
        if 'thermo_grad_error' in local_configure['job_recovery']:
            resub_tmp = recovery.resub_tighter(error)
            if resub_tmp:
                print((
                    'Job ' + os.path.split(error)[-1] +
                    ' needs a better initial geo. Creating a geometry run with tighter convergence criteria'
                ))
                print('')
            resubmitted.append(resub_tmp)

    # Look at jobs in "waiting," resume them if the job they were waiting for is finished
    # Currently, this should only ever be thermo jobs waiting for an ultratight job
    for waiting_dict in waiting:
        if ((nactive + np.sum(resubmitted)) >= max_jobs) or (
            (tools.get_total_queue_usage() + np.sum(resubmitted)) >=
                hard_job_limit):
            hit_queue_limit = True
            continue
        if len(list(waiting_dict.keys())) > 1:
            raise Exception('Waiting job list improperly constructed')
        job = list(waiting_dict.keys())[0]
        waiting_for = waiting_dict[job]
        if waiting_for in finished:
            history = recovery.load_history(job)
            history.waiting = None
            history.save()
            results_for_this_job = manager_io.read_outfile(job)
            if results_for_this_job['thermo_grad_error']:
                resubmitted.append(recovery.resub_thermo(job))
            else:
                raise Exception('A method for resuming job: ' + job +
                                ' is not defined')
        else:
            resubmitted.append(False)

    # Submit jobs which haven't yet been submitted
    if not ((nactive + np.sum(resubmitted)) >= max_jobs) or (
        (tools.get_total_queue_usage() + np.sum(resubmitted)) >=
            hard_job_limit):
        to_submit = []
        jobscripts = tools.find('*_jobscript')
        active_jobs = tools.list_active_jobs(home_directory=directory,
                                             parse_bundles=True)
        for job in jobscripts:
            if not os.path.isfile(job.rsplit('_', 1)[0] +
                                  '.out') and not os.path.split(
                                      job.rsplit('_',
                                                 1)[0])[-1] in active_jobs:
                to_submit.append(job)

        short_jobs_to_submit = [
            i for i in to_submit if tools.check_short_single_point(i)
        ]
        long_jobs_to_submit = [
            i for i in to_submit if i not in short_jobs_to_submit
        ]
        if len(short_jobs_to_submit) > 0:
            bundled_jobscripts = tools.bundle_jobscripts(
                os.getcwd(), short_jobs_to_submit)
        else:
            bundled_jobscripts = []
        to_submit = bundled_jobscripts + long_jobs_to_submit

        submitted = []
        for job in to_submit:
            if ((len(submitted) + nactive + np.sum(resubmitted)) >= max_jobs
                ) or ((tools.get_total_queue_usage() + len(submitted) +
                       np.sum(resubmitted)) >= hard_job_limit):
                hit_queue_limit = True
                continue
            print(('Initial submission for job: ' + os.path.split(job)[-1]))
            tools.qsub(job)
            submitted.append(True)
    else:
        hit_queue_limit = True
        submitted = []

    number_resubmitted = np.sum(np.array(resubmitted + submitted))
    # ~ print str(number_resubmitted)+' Jobs submitted'
    return int(number_resubmitted), int(len(
        completeness['Active'])), hit_queue_limit
Beispiel #6
0
def prep_hfx_resample(path, hfx_values=[0, 5, 10, 15, 20, 25, 30]):
    # Given a path to the outfile of a finished run, this preps the files for hfx resampling
    # Uses the wavefunction from the gas phase calculation as an initial guess
    # Returns a list of the PATH(s) to the jobscript(s) to start the resampling calculations(s)
    home = os.getcwd()
    path = convert_to_absolute_path(path)
    base = os.path.split(path)[0]

    results = manager_io.read_outfile(path)
    if not results['finished']:
        raise Exception('This calculation does not appear to be complete! Aborting...')

    # Check the state of the calculation and ensure than hfx resampling is valid
    infile_dict = manager_io.read_infile(path)
    if infile_dict['method'] != 'b3lyp':
        raise Exception('HFX resampling may not behave well for methods other than b3lyp!')
    if not infile_dict['hfx']:
        infile_dict['hfx'] = 20
    if infile_dict['hfx'] not in hfx_values:
        raise Exception('HFX resampling list does not contain the original hfx value!')

    # Now, start generating the base directory to hold all the hfx resampling values
    name = results['name'] + '_HFXresampling'
    hfx_path = os.path.join(base, name)
    if not os.path.isdir(hfx_path):
        os.mkdir(hfx_path)
    os.chdir(hfx_path)

    # Make the directory for the original calculation
    subname = name + '_' + str(infile_dict['hfx'])
    PATH = os.path.join(hfx_path, subname)
    if not os.path.isdir(PATH):
        os.mkdir(PATH)
    os.chdir(PATH)

    if not os.path.exists(os.path.join(PATH, subname + '.out')):
        shutil.copyfile(path, subname + '.out')
        shutil.copyfile(path.rsplit('.', 1)[0] + '_jobscript', subname + '_jobscript')
        shutil.copyfile(path.rsplit('.', 1)[0] + '.in', subname + '.in')
        shutil.copytree(os.path.join(os.path.split(path)[0], 'scr'), 'scr')
        if os.path.exists(path.rsplit('.', 1)[0] + '.xyz'):
            shutil.copyfile(path.rsplit('.', 1)[0] + '.xyz', subname + '.xyz')

    # Find the hfx resampling values that we're ready to generate
    hfx_values_to_generate = []
    existing_resampled_values = glob.glob(os.path.join(hfx_path, name + '_*'))
    for existing in existing_resampled_values:
        hfx = int(existing.rsplit('_', 1)[1])
        subname = name + '_' + str(hfx)
        outfile_path = os.path.join(existing, subname + '.out')
        if os.path.exists(outfile_path):
            if manager_io.read_outfile(outfile_path)['finished']:
                hfx_values_to_generate.append(hfx - 5)
                hfx_values_to_generate.append(hfx + 5)

    hfx_values_to_generate = list(set(hfx_values_to_generate))
    hfx_values_to_generate = [i for i in hfx_values_to_generate if i in hfx_values]

    # Now generate the additional hfx resampling values
    jobscripts = []
    for hfx in hfx_values_to_generate:
        subname = name + '_' + str(hfx)
        if os.path.exists(os.path.join(hfx_path, subname)):  # skip over values that we've already done
            continue

        os.mkdir(os.path.join(hfx_path, subname))
        os.chdir(os.path.join(hfx_path, subname))

        higher_hfx = subname.rsplit('_', 1)[0] + '_' + str(int(subname.rsplit('_', 1)[1]) + 5)
        lower_hfx = subname.rsplit('_', 1)[0] + '_' + str(int(subname.rsplit('_', 1)[1]) - 5)
        if os.path.exists(os.path.join(hfx_path, higher_hfx)):
            source_dir = os.path.join(hfx_path, higher_hfx)
        else:
            source_dir = os.path.join(hfx_path, lower_hfx)

        if infile_dict['run_type'] == 'minimize':
            optimxyz = os.path.join(source_dir, 'scr', 'optim.xyz')
        else:
            optimxyz = os.path.join(source_dir, 'scr', 'xyz.xyz')
        extract_optimized_geo(optimxyz)

        shutil.copy(os.path.join(source_dir, 'scr', 'optimized.xyz'), subname + '.xyz')
        if infile_dict['spinmult'] == 1:
            shutil.copy(os.path.join(source_dir, 'scr', 'c0'), 'c0')
            manager_io.write_jobscript(subname, custom_line='# -fin c0', machine=machine)
        elif infile_dict['spinmult'] != 1:
            shutil.copyfile(os.path.join(source_dir, 'scr', 'ca0'), os.path.join('ca0'))
            shutil.copyfile(os.path.join(source_dir, 'scr', 'cb0'), os.path.join('cb0'))
            manager_io.write_jobscript(subname, custom_line=['# -fin ca0\n', '# -fin cb0\n'], 
                                       machine=machine)

        local_infile_dict = copy.copy(infile_dict)
        local_infile_dict['guess'] = True
        local_infile_dict['hfx'] = hfx / 100.
        local_infile_dict['name'] = subname
        local_infile_dict['machine'] = machine
        manager_io.write_input(local_infile_dict)
        jobscripts.append(os.path.join(os.getcwd(), subname + '_jobscript'))

    os.chdir(home)

    return jobscripts
Beispiel #7
0
def prep_ultratight(path):
    # Given a path to the outfile of a finished run, this preps a run with tighter convergence criteria
    # Uses the wavefunction and geometry from the previous calculation as an initial guess
    # Returns a list of the PATH(s) to the jobscript(s) to start the solvent sp calculations(s)
    home = os.getcwd()
    path = convert_to_absolute_path(path)

    results = manager_io.read_outfile(path)
    if not results['finished']:
        raise Exception('This calculation does not appear to be complete! Aborting...')

    infile_dict = manager_io.read_infile(path)

    base = os.path.split(path)[0]

    if infile_dict['run_type'] == 'minimize':
        optimxyz = os.path.join(base, 'scr', 'optim.xyz')
    else:
        optimxyz = os.path.join(base, 'scr', 'xyz.xyz')
    extract_optimized_geo(optimxyz)

    # Now, start generating the new directory
    name = results['name'] + '_ultratight'
    PATH = os.path.join(base, name)

    if not os.path.isdir(PATH):  # First time that ultratight has been run, create necessary files
        os.mkdir(PATH)
        os.chdir(PATH)

        if os.path.exists(name + '.in') or os.path.exists(name + '.out') or os.path.exists(name + '_jobscript'):
            raise Exception('This tightened convergence run appears to already exist. Aborting...')

        shutil.copyfile(os.path.join(base, 'scr', 'optimized.xyz'), os.path.join(PATH, name + '.xyz'))
        if infile_dict['spinmult'] == 1:
            shutil.copyfile(os.path.join(base, 'scr', 'c0'), os.path.join(PATH, 'c0'))
            manager_io.write_jobscript(name, custom_line='# -fin c0', machine=machine)
        elif infile_dict['spinmult'] != 1:
            shutil.copyfile(os.path.join(base, 'scr', 'ca0'), os.path.join(PATH, 'ca0'))
            shutil.copyfile(os.path.join(base, 'scr', 'cb0'), os.path.join(PATH, 'cb0'))
            manager_io.write_jobscript(name, custom_line=['# -fin ca0\n', '# -fin cb0\n'], machine=machine)

        criteria = ['2.25e-04', '1.5e-04', '0.9e-03', '0.6e-03', '0.5e-06', '1.5e-05']

        local_infile_dict = copy.copy(infile_dict)
        local_infile_dict['guess'] = True
        local_infile_dict['convergence_thresholds'] = criteria
        local_infile_dict['name'] = name
        local_infile_dict['machine'] = machine

        manager_io.write_input(local_infile_dict)

        # Make an empty .out file to prevent the resubmission module from mistakenly submitting this job twice
        f = open(name + '.out', 'w')
        f.close()

        os.chdir(home)

        return [os.path.join(PATH, name + '_jobscript')]

    else:  # This has been run before, further tighten the convergence criteria
        os.chdir(PATH)
        infile_dict = manager_io.read_infile(os.path.join(PATH, name + '.out'))
        criteria = [str(float(i) / 2.) for i in infile_dict['convergence_thresholds']]

        local_infile_dict = copy.copy(infile_dict)
        local_infile_dict['guess'] = True
        local_infile_dict['convergence_thresholds'] = criteria
        local_infile_dict['name'] = name
        local_infile_dict['machine'] = machine
        manager_io.write_input(local_infile_dict)

        extract_optimized_geo(os.path.join(PATH, 'scr', 'optim.xyz'))
        shutil.copy(os.path.join(PATH, 'scr', 'optimized.xyz'), os.path.join(PATH, name + '.xyz'))

        os.chdir(home)

        return [os.path.join(PATH, name + '_jobscript')]
Beispiel #8
0
def prep_functionals_sp(path, functionalsSP):
    home = os.getcwd()
    path = convert_to_absolute_path(path)
    results = manager_io.read_outfile(path)
    if not results['finished']:
        raise Exception('This calculation does not appear to be complete! Aborting...')

    infile_dict = manager_io.read_infile(path)
    base = os.path.split(path)[0]

    if infile_dict['run_type'] == 'minimize':
        optimxyz = os.path.join(base, 'scr', 'optim.xyz')
    else:
        optimxyz = os.path.join(base, 'scr', 'xyz.xyz')
    extract_optimized_geo(optimxyz)

    # Now, start generating the new directory
    funcname = results['name'] + '_functionalsSP'
    functional_base_path = os.path.join(base, funcname)
    if os.path.isdir(functional_base_path):
        # print('Directory for functional single point already exists')
        pass
    else:
        os.mkdir(functional_base_path)
    os.chdir(functional_base_path)

    jobscripts = []
    for func in functionalsSP:
        PATH = os.path.join(functional_base_path, str(func))
        if os.path.isdir(PATH):
            continue
        ensure_dir(PATH)
        name = results['name'] + "_functional_" + str(func)
        shutil.copyfile(os.path.join(base, 'scr', 'optimized.xyz'), os.path.join(PATH, name + '.xyz'))
        guess = False
        os.chdir(PATH)
        if infile_dict['spinmult'] == 1:
            if os.path.isfile(os.path.join(base, 'scr', 'c0')):
                shutil.copyfile(os.path.join(base, 'scr', 'c0'), os.path.join(PATH, 'c0'))
                manager_io.write_jobscript(name, custom_line='# -fin c0', machine=machine)
                guess = True
        else:
            if os.path.isfile(os.path.join(base, 'scr', 'ca0')) and os.path.isfile(os.path.join(base, 'scr', 'cb0')):
                shutil.copyfile(os.path.join(base, 'scr', 'ca0'), os.path.join(PATH, 'ca0'))
                shutil.copyfile(os.path.join(base, 'scr', 'cb0'), os.path.join(PATH, 'cb0'))
                manager_io.write_jobscript(name, custom_line=['# -fin ca0\n', '# -fin cb0\n'], machine=machine)
                guess = True
        local_infile_dict = copy.copy(infile_dict)
        local_infile_dict['guess'] = guess
        local_infile_dict['run_type'] = 'energy'
        local_infile_dict['name'] = name
        local_infile_dict['levelshifta'], local_infile_dict['levelshiftb'] = 0.25, 0.25
        local_infile_dict['method'] = func
        local_infile_dict['machine'] = machine

        manager_io.write_input(local_infile_dict)

        fil = open('configure', 'w')
        fil.write('method:' + func)
        fil.close()
        os.chdir(home)
        jobscripts.append(os.path.join(PATH, name + '_jobscript'))
    os.chdir(home)
    return jobscripts
Beispiel #9
0
def prep_solvent_sp(path, solvents=[78.9]):
    # Given a path to the outfile of a finished run, this preps the files for a single point solvent run
    # Uses the wavefunction from the gas phase calculation as an initial guess
    # Returns a list of the PATH(s) to the jobscript(s) to start the solvent sp calculations(s)
    home = os.getcwd()
    path = convert_to_absolute_path(path)

    results = manager_io.read_outfile(path)
    if not results['finished']:
        raise Exception('This calculation does not appear to be complete! Aborting...')

    infile_dict = manager_io.read_infile(path)

    base = os.path.split(path)[0]

    if infile_dict['run_type'] == 'minimize':
        optimxyz = os.path.join(base, 'scr', 'optim.xyz')
    else:
        optimxyz = os.path.join(base, 'scr', 'xyz.xyz')
    extract_optimized_geo(optimxyz)

    # Now, start generating the new directory
    solname = results['name'] + '_solvent'
    solvent_base_path = os.path.join(base, solname)
    # print(solvent_base_path)
    if os.path.isdir(solvent_base_path):
        # print('Directory for solvent single point already exists')
        pass
    else:
        os.mkdir(solvent_base_path)
    os.chdir(solvent_base_path)

    jobscripts = []
    for sol_val in solvents:
        PATH = os.path.join(solvent_base_path, str(sol_val).replace('.', '_'))
        if os.path.isdir(PATH):
            continue
        ensure_dir(PATH)
        name = results['name'] + "_solvent_" + str(sol_val)
        shutil.copyfile(os.path.join(base, 'scr', 'optimized.xyz'), os.path.join(PATH, name + '.xyz'))
        guess = False
        os.chdir(PATH)
        if infile_dict['spinmult'] == 1:
            if os.path.isfile(os.path.join(base, 'scr', 'c0')):
                shutil.copyfile(os.path.join(base, 'scr', 'c0'), os.path.join(PATH, 'c0'))
                manager_io.write_jobscript(name, custom_line='# -fin c0', machine=machine)
                guess = True
        else:
            if os.path.isfile(os.path.join(base, 'scr', 'ca0')) and os.path.isfile(os.path.join(base, 'scr', 'cb0')):
                shutil.copyfile(os.path.join(base, 'scr', 'ca0'), os.path.join(PATH, 'ca0'))
                shutil.copyfile(os.path.join(base, 'scr', 'cb0'), os.path.join(PATH, 'cb0'))
                manager_io.write_jobscript(name, custom_line=['# -fin ca0\n', '# -fin cb0\n'], machine=machine)
                guess = True
        local_infile_dict = copy.copy(infile_dict)
        local_infile_dict['solvent'], local_infile_dict['guess'] = sol_val, guess
        local_infile_dict['run_type'] = 'energy'
        local_infile_dict['name'] = name
        local_infile_dict['levelshifta'], local_infile_dict['levelshiftb'] = 0.25, 0.25
        local_infile_dict['machine'] = machine

        manager_io.write_input(local_infile_dict)

        os.chdir(home)
        jobscripts.append(os.path.join(PATH, name + '_jobscript'))
    os.chdir(home)
    return jobscripts
Beispiel #10
0
def check_completeness(directory='in place', max_resub=5, configure_dict=False):
    ## Takes a directory, returns lists of finished, failed, and in-progress jobs
    outfiles = find('*.out', directory)
    outfiles = list(filter(check_valid_outfile, outfiles))

    results_tmp = [manager_io.read_outfile(outfile, short_ouput=True) for outfile in outfiles]
    results_tmp = list(zip(outfiles, results_tmp))
    results_dict = dict()
    for outfile, tmp in results_tmp:
        results_dict[outfile] = tmp
        # print(outfile, tmp['oscillating_scf_error'])

    active_jobs = list_active_jobs(home_directory=directory, parse_bundles=True)

    def check_finished(path, results_dict=results_dict):
        # Return True if the outfile corresponds to a complete job, False otherwise
        if results_dict[path]['finished']:
            return True
        else:
            return False

    def check_active(path, active_jobs=active_jobs):
        # Given a path, checks if it's in the queue currently:
        name = os.path.split(path)[-1]
        name = name.rsplit('.', 1)[0]
        if name in active_jobs:
            return True
        else:
            return False

    def check_needs_resub(path):
        if os.path.isfile(path.rsplit('.', 1)[0] + '.pickle'):
            history = resub_history()
            history.read(path)
            return history.needs_resub
        else:
            return False

    def check_waiting(path):
        if os.path.isfile(path.rsplit('.', 1)[0] + '.pickle'):
            history = resub_history()
            history.read(path)
            if history.waiting:
                return True
        return False

    def grab_waiting(path):
        if os.path.isfile(path.rsplit('.', 1)[0] + '.pickle'):
            history = resub_history()
            history.read(path)
            return history.waiting
        raise Exception('Attempting to grab a "waiting" criteria that does not exist')

    def check_chronic_failure(path):
        if os.path.isfile(path.rsplit('.', 1)[0] + '.pickle'):
            history = resub_history()
            history.read(path)
            if history.resub_number >= max_resub:
                return True
        else:
            return False

    def check_spin_contaminated(path, results_dict=results_dict):
        results = results_dict[path]
        if configure_dict and "ss_cutoff" in configure_dict:
            ss_cutoff = configure_dict['ss_cutoff']
        else:
            ss_cutoff = 1.0
        if results['finished']:
            if type(results['s_squared_ideal']) == float:
                if abs(results['s_squared'] - results['s_squared_ideal']) > ss_cutoff:
                    return True
        return False

    def check_oscillating_scf_error(path, results_dict=results_dict):
        results = results_dict[path]
        if results['oscillating_scf_error']:
            return True
        else:
            return False

    def check_scf_error(path, results_dict=results_dict):
        results = results_dict[path]
        if results['scf_error']:
            return True
        else:
            return False

    def check_thermo_grad_error(path, results_dict=results_dict):
        results = results_dict[path]
        if results['thermo_grad_error']:
            return True
        else:
            return False

    active_jobs = list(filter(check_active, outfiles))
    finished = list(filter(check_finished, outfiles))
    needs_resub = list(filter(check_needs_resub, outfiles))
    waiting = list(filter(check_waiting, outfiles))
    spin_contaminated = list(filter(check_spin_contaminated, outfiles))
    all_scf_errors = list(filter(check_scf_error, outfiles))
    oscillating_scf_errors = list(filter(check_oscillating_scf_error, outfiles))
    thermo_grad_errors = list(filter(check_thermo_grad_error, outfiles))
    chronic_errors = list(filter(check_chronic_failure, outfiles))
    errors = list(set(outfiles) - set(active_jobs) - set(finished))
    scf_errors = list(filter(check_scf_error, errors))

    # Look for additional active jobs that haven't yet generated outfiles
    jobscript_list = find('*_jobscript', directory)
    jobscript_list = [i.rsplit('_', 1)[0] + '.out' for i in jobscript_list]
    extra_active_jobs = list(filter(check_active, jobscript_list))
    active_jobs.extend(extra_active_jobs)

    # Sort out conflicts in order of reverse priority
    # A job only gets labelled as finished if it's in no other category
    # A job always gets labelled as active if it fits that criteria, even if it's in every other category too

    priority_list = [active_jobs, chronic_errors, waiting, thermo_grad_errors,
                     oscillating_scf_errors, scf_errors, errors, spin_contaminated, needs_resub, finished]
    priority_list_names = ['Active', 'Chronic_errors', 'Waiting', 'Thermo_grad_error',
                           'oscillating_scf_errors', 'SCF_Error', 'Error', 'Spin_contaminated', 'Needs_resub',
                           'Finished']
    priority_list = priority_sort(priority_list)

    results = dict()
    for key, lst in zip(priority_list_names, priority_list):
        results[key] = lst

    # There are two special categories which operate a bit differently: waiting and "SCF_Errors_Including_Active"
    waiting = [{i: grab_waiting(i)} for i in waiting]
    results['Waiting'] = waiting
    results['SCF_Errors_Including_Active'] = all_scf_errors

    return results