Exemplo n.º 1
0
def parse_file(file_path):
    logfile_type = ccopen(file_path)
    if logfile_type is not None:
        try:
            parsed_data = ccread(file_path)
            parsed_data.listify()
            res = {"success": True, "attributes": {}}
            for x in parsed_data._attributes:
                try:
                    val = getattr(parsed_data, x)
                    res["attributes"][x] = val
                except Exception as e:
                    pass
            if ob_import is True:
                inchi = get_InChI(res["attributes"])
                if inchi is not None:
                    res["InChI"] = inchi
        except:
            res = {"success": False}
    else:
        res = {"success": False}
    if res["success"]:
        make_chemical_formula(res)
        res["xyz_data"] = XYZ_data(res["attributes"])
    return res
Exemplo n.º 2
0
def process_logfiles(filenames, output, identifier):
    df = ccframe([ccopen(path) for path in filenames])
    if output is not None:
        outputtype = os.path.splitext(os.path.basename(output))[1][1:]
        if not outputtype:
            raise RuntimeWarning(
                "The output type could not be determined from the given path, "
                "not writing DataFrame to disk"
            )

        if outputtype in {'csv'}:
            df.to_csv(output)
        elif outputtype in {'h5', 'hdf', 'hdf5'}:
            df.to_hdf(output, key=identifier)
        elif outputtype in {'json'}:
            df.to_json(output)
        elif outputtype in {'pickle', 'pkl'}:
            df.to_pickle(output)
        elif outputtype in {'xlsx'}:
            writer = pd.ExcelWriter(output)
            # This overwrites previous sheets
            # (see https://stackoverflow.com/a/42375263/4039050)
            df.to_excel(writer, sheet_name=identifier)
            writer.save()
    else:
        print(df)
Exemplo n.º 3
0
def main():
    """Parse a series of output files and print out the center of mass and
    canter of nuclear charge for the final geometry.
    """
    import argparse
    from cclib.io import ccopen
    import periodic_table as pt

    parser = argparse.ArgumentParser()
    parser.add_argument('filename', nargs='+')
    args = parser.parse_args()

    for filename in args.filename:

        job = ccopen(filename)
        data = job.parse()

        # pylint: disable=E1101
        elementnums = data.atomnos
        elementmasses = [pt.Mass[pt.Element[i]] for i in elementnums]
        coords = data.atomcoords[-1]

        print(elementnums)
        print(elementmasses)
        print(coords)
        print(centerofmass(coords, elementmasses))
        print(centerofnuccharge(coords, elementnums))
Exemplo n.º 4
0
def process_logfiles(filenames, output, identifier):
    df = ccframe([ccopen(path) for path in filenames])
    if output is not None:
        outputtype = os.path.splitext(os.path.basename(output))[1][1:]
        if not outputtype:
            raise RuntimeWarning(
                "The output type could not be determined from the given path, "
                "not writing DataFrame to disk")

        if outputtype in {'csv'}:
            df.to_csv(output, mode='w')
        elif outputtype in {'h5', 'hdf', 'hdf5'}:
            df.to_hdf(output, mode='w', key=identifier)
        elif outputtype in {'json'}:
            df.to_json(output)
        elif outputtype in {'pickle', 'pkl'}:
            df.to_pickle(output)
        elif outputtype in {'xlsx'}:
            writer = pd.ExcelWriter(output, mode='w')
            # This overwrites previous sheets
            # (see https://stackoverflow.com/a/42375263/4039050)
            df.to_excel(writer, sheet_name=identifier)
            writer.save()
    else:
        print(df)
Exemplo n.º 5
0
def main():
    """Given a set of geometry optimization jobs, determine if they've
    converged.
    """

    import argparse
    from cclib.io import ccopen

    parser = argparse.ArgumentParser()
    parser.add_argument('filenames', nargs = '+')
    args = parser.parse_args()
    filenames = args.filenames

    for filename in filenames:

        job = ccopen(filename)
        # We don't need to parse the output for "traditional" data, we
        # just want to determine the type of file.
        # data = job.parse()

        if type(job) == 'cclib.parser.orcaparser.ORCA':
            string = 'THE OPTIMIZATION HAS CONVERGED'
        elif type(job) == 'cclib.parser.qchemparser.QChem':
            string = ''
        else:
            string = ''

        converged = find_string_in_file(filename, string)
        print('{}: {}'.format(filename, converged))
Exemplo n.º 6
0
def read_file(filn, no_mols):
    # parse file
    log = ccopen(filn)
    # features we want to extract:
    # Open the log files
    # Parse the relevant data
    mol = log.parse()
    # Get molecular orbitals. Need the transpose for our purposes.
    MOs = (mol.mocoeffs[0]).T

    # Get overlaps. These are symmetric so transpose not required in this case
    S = mol.aooverlaps
    # features we need depending on whether it's dimer or monomer
    if no_mols < 2:
        # Size of basis sets
        nbasis = mol.nbasis
        # Position of H**O
        nhomo = mol.homos
        #print(nhomo)
        return nbasis, MOs, S, nhomo
    elif no_mols == 2:
        # Get eigenvalues of pair
        EvalsAB = mol.moenergies[0]
        Dpair = sp.linalg.cholesky(S)
        return MOs, Dpair, EvalsAB
Exemplo n.º 7
0
def get_energy(outputfilename, energy_type_str='scf'):
    job = ccopen(outputfilename)
    energies = get_energy_type(energy_type_str)
    for (jobtype, string_to_search, fieldnum) in energies:
        if isinstance(job, jobtype):
            with open(outputfilename) as fh:
                energy = search_file(fh, string_to_search, fieldnum).replace('D', 'E')
            return float(energy)
Exemplo n.º 8
0
def main():
    parser1 = ccopen(sys.argv[1], logging.ERROR)
    parser2 = ccopen(sys.argv[2], logging.ERROR)
    parser3 = ccopen(sys.argv[3], logging.ERROR)

    data1 = parser1.parse()
    data2 = parser2.parse()
    data3 = parser3.parse()
    fa = CDA(data1, None, logging.ERROR)
    retval = fa.calculate([data2, data3])

    if retval:

        print("Charge decomposition analysis of %s\n" % (sys.argv[1]))

        if len(data1.homos) == 2:
            print("ALPHA SPIN:")
            print("===========")

        print(" MO#      d       b       r       s")
        print("-------------------------------------")

        for spin in range(len(data1.homos)):

            if spin == 1:
                print("\nBETA SPIN:")
                print("==========")

            for i in range(len(fa.donations[spin])):

                print("%4i: %7.3f %7.3f %7.3f %7.3f" % \
                        (i + 1, fa.donations[spin][i],
                                fa.bdonations[spin][i],
                                fa.repulsions[spin][i],
                                fa.residuals[spin][i]))

                if i == data1.homos[spin]:
                    print("------ H**O - LUMO gap ------")

            print("-------------------------------------")
            print(" T:   %7.3f %7.3f %7.3f %7.3f" % \
                    (fa.donations[spin].sum(),
                        fa.bdonations[spin].sum(),
                        fa.repulsions[spin].sum(),
                        fa.residuals[spin].sum()))
Exemplo n.º 9
0
def main():
    parser1 = ccopen(sys.argv[1], logging.ERROR)
    parser2 = ccopen(sys.argv[2], logging.ERROR)
    parser3 = ccopen(sys.argv[3], logging.ERROR)

    data1 = parser1.parse(); data2 = parser2.parse(); data3 = parser3.parse()
    fa = CDA(data1, None, logging.ERROR)
    retval = fa.calculate([data2, data3])

    if retval:

        print("Charge decomposition analysis of %s\n"%(sys.argv[1]))

        if len(data1.homos) == 2:
            print("ALPHA SPIN:")
            print("===========")

        print(" MO#      d       b       r       s")
        print("-------------------------------------")

        for spin in range(len(data1.homos)):

            if spin == 1:
                print("\nBETA SPIN:")
                print("==========")

            for i in range(len(fa.donations[spin])):

                print("%4i: %7.3f %7.3f %7.3f %7.3f" % \
                        (i + 1, fa.donations[spin][i],
                                fa.bdonations[spin][i],
                                fa.repulsions[spin][i],
                                fa.residuals[spin][i]))

                if i == data1.homos[spin]:
                    print("------ H**O - LUMO gap ------")
                    

            print("-------------------------------------")
            print(" T:   %7.3f %7.3f %7.3f %7.3f" % \
                    (fa.donations[spin].sum(),
                        fa.bdonations[spin].sum(),
                        fa.repulsions[spin].sum(),
                        fa.residuals[spin].sum()))
def main():
    """The main routine!"""

    parser = argparse.ArgumentParser()

    parser.add_argument('compchemfilename', nargs='+')
    parser.add_argument('--scaling-energy-change', type=float, default=10.0)

    args = parser.parse_args()
    compchemfilenames = args.compchemfilename

    for compchemfilename in compchemfilenames:

        stub = os.path.splitext(compchemfilename)[0]

        job = ccopen(compchemfilename)
        data = job.parse()

        fig, ax = plt.subplots()

        if type(job) == cclib.parser.qchemparser.QChem:

            scfenergies = [
                utils.convertor(scfenergy, 'eV', 'hartree')
                for scfenergy in data.scfenergies
            ]
            gradients = [geovalue[0] for geovalue in data.geovalues]
            displacements = [geovalue[1] for geovalue in data.geovalues]
            energy_changes = [(geovalue[2] * args.scaling_energy_change)
                              for geovalue in data.geovalues]

            # If this isn't true, something funny happened during the
            # parsing, so fail out.
            assert len(scfenergies) == len(gradients)

            steps = range(1, len(scfenergies) + 1)

            # ax.plot(steps, scfenergies, label='SCF energy')
            ax.plot(steps, gradients, label='max gradient')
            ax.plot(steps, displacements, label='max displacement')
            ax.plot(steps, energy_changes, label='energy change')

            ax.set_title(stub)
            ax.set_xlabel('optimization step #')

        elif type(job) == cclib.parser.orcaparser.ORCA:

            pass

        else:
            pass

        ax.legend(loc='best', fancybox=True)

        fig.savefig(stub + '.pdf', bbox_inches='tight')
Exemplo n.º 11
0
def print_dispatch(outputfile):
    job = ccopen(outputfile)
    program_types = (
        (cclib.parser.adfparser.ADF, print_epr_adf),
        (cclib.parser.daltonparser.DALTON, print_epr_dalton),
        (cclib.parser.orcaparser.ORCA, print_epr_orca),
        (cclib.parser.qchemparser.QChem, print_epr_qchem),
    )
    for (program_type, print_function) in program_types:
        if isinstance(job, program_type):
            d = print_function(outputfile)
    return d
Exemplo n.º 12
0
def main():
    args = _parse_args()
    log_file = ccopen(args.quantum_chemical_file)
    me = MolecularExtent(log_file.parse(), args.radius)
    me.calculate_extent(args.index)
    if args.output is not None:
        me.as_jmol(args.index, output=args.output)
    print('origin=',
          tuple(me.lower_limit),
          ', topcorner=',
          tuple(me.upper_limit),
          sep='')
Exemplo n.º 13
0
def open_and_parse_outputfile(args, outputfilename):
    """The main routine for opening and parsing each output file."""
    # ORCA prints 6 columns at a time for these types of blocks.
    ncols = 6

    headers = [
        'LOEWDIN ORBITAL POPULATIONS PER MO',
        'LOEWDIN REDUCED ORBITAL POPULATIONS PER MO',
        #'LOEWDIN REDUCED ORBITAL POPULATIONS PER UNO',
        # 'LOEWDIN REDUCED ORBITAL POPULATIONS PER UNSO',
        # This is equivalent to the reduced orbital population per MO, but
        # named differently within CASSCF/MRCI jobs.
        'LOEWDIN ORBITAL-COMPOSITIONS'
    ]

    energies = list()
    occupations = list()
    orbitals = dict()

    # Pre-determine the number of MOs present and whether or not there
    # are two sets of canonical MOs.
    job = ccopen(outputfilename)
    data = job.parse()
    nmo = data.nmo
    has_beta = False
    if len(data.homos) == 2:
        has_beta = True

    # For each possible header, parse the section.
    with open(outputfilename) as outputfile:
        for line in outputfile:
            for header in headers:
                if header in line:
                    parsed_header = header
                    print(parsed_header)
                    parse_section(outputfile, nmo, energies, occupations,
                                  orbitals, has_beta)

    # determine the last orbital we should be printing information
    # about
    if not args['--max_orbital']:
        args['--max_orbital'] = data.homos[0] * 2
    if args['--max_orbital'] == 'all':
        args['--max_orbital'] = nmo
    max_orbital = int(args['--max_orbital'])

    threshold = float(args['--threshold'])
    filtered_mos = get_orbital_contribs_within_threshold(
        orbitals, threshold, max_orbital)
    print(parsed_header)
    pretty_print_orbitals(energies, filtered_mos, nmo, has_beta)

    return energies, occupations, orbitals
Exemplo n.º 14
0
def print_dispatch(outputfile):
    job = ccopen(outputfile)
    program_types = (
        (cclib.parser.adfparser.ADF, print_epr_adf),
        (cclib.parser.daltonparser.DALTON, print_epr_dalton),
        (cclib.parser.orcaparser.ORCA, print_epr_orca),
        (cclib.parser.qchemparser.QChem, print_epr_qchem),
    )
    for (program_type, print_function) in program_types:
        if isinstance(job, program_type):
            d = print_function(outputfile)
    return d
Exemplo n.º 15
0
def open_and_parse_outputfile(args, outputfilename):
    """The main routine for opening and parsing each output file."""
    # ORCA prints 6 columns at a time for these types of blocks.
    ncols = 6

    headers = [
        'LOEWDIN ORBITAL POPULATIONS PER MO',
        'LOEWDIN REDUCED ORBITAL POPULATIONS PER MO',
        #'LOEWDIN REDUCED ORBITAL POPULATIONS PER UNO',
        # 'LOEWDIN REDUCED ORBITAL POPULATIONS PER UNSO',
        # This is equivalent to the reduced orbital population per MO, but
        # named differently within CASSCF/MRCI jobs.
        'LOEWDIN ORBITAL-COMPOSITIONS'
    ]

    energies = list()
    occupations = list()
    orbitals = dict()

    # Pre-determine the number of MOs present and whether or not there
    # are two sets of canonical MOs.
    job = ccopen(outputfilename)
    data = job.parse()
    nmo = data.nmo
    has_beta = False
    if len(data.homos) == 2:
        has_beta = True

    # For each possible header, parse the section.
    with open(outputfilename) as outputfile:
        for line in outputfile:
            for header in headers:
                if header in line:
                    parsed_header = header
                    print(parsed_header)
                    parse_section(outputfile, nmo, energies, occupations, orbitals, has_beta)

    # determine the last orbital we should be printing information
    # about
    if not args['--max_orbital']:
        args['--max_orbital'] = data.homos[0] * 2
    if args['--max_orbital'] == 'all':
        args['--max_orbital'] = nmo
    max_orbital = int(args['--max_orbital'])

    threshold = float(args['--threshold'])
    filtered_mos = get_orbital_contribs_within_threshold(orbitals, threshold, max_orbital)
    print(parsed_header)
    pretty_print_orbitals(energies, filtered_mos, nmo, has_beta)

    return energies, occupations, orbitals
def main():
    """The main routine!"""

    parser = argparse.ArgumentParser()

    parser.add_argument('compchemfilename', nargs='+')
    parser.add_argument('--scaling-energy-change', type=float, default=10.0)

    args = parser.parse_args()
    compchemfilenames = args.compchemfilename

    for compchemfilename in compchemfilenames:

        stub = os.path.splitext(compchemfilename)[0]

        job = ccopen(compchemfilename)
        data = job.parse()

        fig, ax = plt.subplots()

        if type(job) == cclib.parser.qchemparser.QChem:

            scfenergies = [utils.convertor(scfenergy, 'eV', 'hartree') for scfenergy in data.scfenergies]
            gradients = [geovalue[0] for geovalue in data.geovalues]
            displacements = [geovalue[1] for geovalue in data.geovalues]
            energy_changes = [(geovalue[2] * args.scaling_energy_change) for geovalue in data.geovalues]

            # If this isn't true, something funny happened during the
            # parsing, so fail out.
            assert len(scfenergies) == len(gradients)

            steps = range(1, len(scfenergies) + 1)

            # ax.plot(steps, scfenergies, label='SCF energy')
            ax.plot(steps, gradients, label='max gradient')
            ax.plot(steps, displacements, label='max displacement')
            ax.plot(steps, energy_changes, label='energy change')

            ax.set_title(stub)
            ax.set_xlabel('optimization step #')

        elif type(job) == cclib.parser.orcaparser.ORCA:

            pass

        else:
            pass

        ax.legend(loc='best', fancybox=True)

        fig.savefig(stub + '.pdf', bbox_inches='tight')
Exemplo n.º 17
0
def parse_vibrational_anharmonic_analysis(outputfilename):
    cclib_job = ccopen(outputfilename)
    cclib_data = cclib_job.parse()

    # If we can't even find harmonic frequencies, jump out here.
    try:
        nmodes = len(cclib_data.vibfreqs)
    except:
        return dict()

    mode_dict = dict()
    for mode in range(1, nmodes + 1):
        mode_dict[mode] = OrderedDict()

    for mode, harmonic_frequency in zip(count(start=1), cclib_data.vibfreqs):
        mode_dict[mode]['harmonic'] = harmonic_frequency

    with open(outputfilename) as outputfile:
        line = ''
        while 'VIBRATIONAL ANHARMONIC ANALYSIS' not in line:
            try:
                line = next(outputfile)
            # We only need one try/except, since if we don't match here,
            # we'll never match.
            except StopIteration:
                return mode_dict
        while 'TOSH' not in line:
            line = next(outputfile)

        while line.strip().split() != []:
            mode = int(line[6:8])
            mode_dict[mode]['tosh'] = float(line[9:22])
            mode_dict[mode]['vpt2'] = float(line[30:])
            line = next(outputfile)

        line = next(outputfile)
        while list(set(line.strip())) != ['=']:
            if line.strip().split() != []:
                quantum = line[5:7].strip()
                key = 'vci' + quantum
                mode = int(line[13:15])
                # quanta = float(line.split()[-4])
                freq = float(line.split()[-1])
                mode_dict[mode][key] = freq
                line = next(outputfile)
            else:
                line = next(outputfile)

    return mode_dict
Exemplo n.º 18
0
def parse_vibrational_anharmonic_analysis(outputfilename):
    cclib_job = ccopen(outputfilename)
    cclib_data = cclib_job.parse()

    # If we can't even find harmonic frequencies, jump out here.
    try:
        nmodes = len(cclib_data.vibfreqs)
    except:
        return dict()

    mode_dict = dict()
    for mode in range(1, nmodes + 1):
        mode_dict[mode] = OrderedDict()

    for mode, harmonic_frequency in zip(count(start=1), cclib_data.vibfreqs):
        mode_dict[mode]['harmonic'] = harmonic_frequency

    with open(outputfilename) as outputfile:
        line = ''
        while 'VIBRATIONAL ANHARMONIC ANALYSIS' not in line:
            try:
                line = next(outputfile)
            # We only need one try/except, since if we don't match here,
            # we'll never match.
            except StopIteration:
                return mode_dict
        while 'TOSH' not in line:
            line = next(outputfile)

        while line.strip().split() != []:
            mode = int(line[6:8])
            mode_dict[mode]['tosh'] = float(line[9:22])
            mode_dict[mode]['vpt2'] = float(line[30:])
            line = next(outputfile)

        line = next(outputfile)
        while list(set(line.strip())) != ['=']:
            if line.strip().split() != []:
                quantum = line[5:7].strip()
                key = 'vci' + quantum
                mode = int(line[13:15])
                # quanta = float(line.split()[-4])
                freq = float(line.split()[-1])
                mode_dict[mode][key] = freq
                line = next(outputfile)
            else:
                line = next(outputfile)

    return mode_dict
Exemplo n.º 19
0
def main():
    if not _has_pandas:
        print("You need to have pandas installed")
        sys.exit(1)

    parser = argparse.ArgumentParser()
    parser.add_argument('-O',
                        '--output',
                        help=('the output document to write, including an '
                              'extension supported by pandas '
                              '(csv, h5/hdf/hdf5, json, pickle/pkl, xlsx)'))
    parser.add_argument('compchemlogfiles',
                        metavar='compchemlogfile',
                        nargs='+',
                        help=('one or more computational chemistry output '
                              'files to parse and convert'))
    parser.add_argument('--identifier',
                        default='logfiles',
                        help=('name of sheet which will contain DataFrame, if '
                              'writing to an Excel file, or identifier for '
                              'the group in HDFStore, if writing a HDF file'))
    args = parser.parse_args()

    output = args.output
    identifier = args.identifier
    filenames = args.compchemlogfiles

    df = ccframe([ccopen(path) for path in filenames])

    if output is not None:
        outputtype = os.path.splitext(os.path.basename(output))[1][1:]

        if outputtype in {'csv'}:
            df.to_csv(output)
        elif outputtype in {'h5', 'hdf', 'hdf5'}:
            df.to_hdf(output, key=identifier)
        elif outputtype in {'json'}:
            df.to_json(output)
        elif outputtype in {'pickle', 'pkl'}:
            df.to_pickle(output)
        elif outputtype in {'xlsx'}:
            writer = pd.ExcelWriter(output)
            # This overwrites previous sheets
            # (see https://stackoverflow.com/a/42375263/4039050)
            df.to_excel(writer, sheet_name=identifier)
            writer.save()
    else:
        print(df)
Exemplo n.º 20
0
def main():
    """The main routine!"""

    parser = argparse.ArgumentParser()

    parser.add_argument('compchemfilename', nargs='+')

    args = parser.parse_args()
    compchemfilenames = args.compchemfilename

    for compchemfilename in compchemfilenames:

        stub = os.path.splitext(compchemfilename)[0]

        job = ccopen(compchemfilename)
        data = job.parse()

        fig, ax = plt.subplots()

        if type(job) == cclib.parser.qchemparser.QChem:

            scfenergies = [
                utils.convertor(scfenergy, 'eV', 'hartree')
                for scfenergy in data.scfenergies
            ]
            print(scfenergies)
            # scfenergies = [scfenergy for scfenergy in data.scfenergies]

            steps = range(1, len(scfenergies) + 1)

            ax.plot(steps, scfenergies, label='SCF energy')

            ax.set_title(stub)
            ax.set_xlabel('SCF step #')
            ax.set_xticks(steps)

        elif type(job) == cclib.parser.orcaparser.ORCA:

            pass

        else:
            pass

        ax.legend(loc='best', fancybox=True)

        fig.savefig(stub + '.pdf', bbox_inches='tight')
Exemplo n.º 21
0
def main():
    args = _parse_args()
    log_file = ccopen(args.quantum_chemical_file)
    data = log_file.parse()
    me = MolecularExtent(data, args.radius)
    me.calculate_extent(args.index)
    volume = Volume(me.lower_limit,
                    me.upper_limit,
                    spacing=(args.spacing, args.spacing, args.spacing))
    slice_end = data.homos[args.spinidx] + 1
    print('density of molecular orbitals [0..%d] H**O=%d' %
          (slice_end, data.homos[args.spinidx]))
    density = electrondensity(data.atomcoords[args.index],
                              data.mocoeffs[args.spinidx][0:slice_end],
                              data.gbasis, volume)
    print('integral', density.integrate(), density.integrate_square())
    if args.output is not None:
        density.writeascube(args.output + '.cube')
Exemplo n.º 22
0
def main():
    pse = PeriodicTable()
    formula = {}
    log = ccopen(sys.argv[1])
    data = log.parse()
    for atom in data.atomnos:
        formula[atom] = formula.setdefault(atom, 0) + 1
    string = []
    exclude = set()
    if 6 in formula:
        string.append('C%d' % formula[6])
        exclude.add(6)
        if 1 in formula:
            string.append('H%d' % formula[1])
            exclude.add(1)
    elements = set(formula.keys()) - exclude
    for element in sorted([pse.element[atom] for atom in elements]):
        string.append(element + str(formula[pse.number[element]]))
    print(' '.join(string))
Exemplo n.º 23
0
    parser = argparse.ArgumentParser()

    parser.add_argument('outputfilename', nargs='+')

    args = parser.parse_args()

    return args


if __name__ == '__main__':

    args = getargs()

    scfenergies = []

    for outputfilename in args.outputfilename:
        if 'cfour' in outputfilename.lower():
            program = 'CFOUR'
            scfenergy = get_energy_nocclib(outputfilename, 'E(SCF)=', 1)
        else:
            job = ccopen(outputfilename)
            program = program_names[type(job)]
            data = job.parse()
            scfenergy = convertor(data.scfenergies[0], 'eV', 'hartree')
        scfenergies.append((program, outputfilename, scfenergy))

    scfenergies = sorted(scfenergies, key=lambda x: x[2])

    for (program, outputfilename, scfenergy) in scfenergies:
        print(scfenergy, program, outputfilename)
Exemplo n.º 24
0
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument(
        'outputtype',
        choices=('json', 'cjson', 'cml', 'xyz', 'molden', 'wfx'),
        help='the output format to write (json/cjson are identical)')
    parser.add_argument(
        'compchemlogfile',
        nargs='+',
        help=
        'one or more computational chemistry output files to parse and convert'
    )

    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        help='more verbose parsing output (only errors by default)')

    parser.add_argument('-g',
                        '--ghost',
                        type=str,
                        default=None,
                        help='Symbol to use for ghost atoms')

    parser.add_argument(
        '-t',
        '--terse',
        action='store_true',
        help=
        'CJSON by default is not indented for readability, saves space (indented for readability\'s sake)'
    )

    parser.add_argument(
        '-u',
        '--future',
        action='store_true',
        help='use experimental features (currently optdone_as_list)')

    parser.add_argument(
        '-i',
        '--index',
        type=int,
        default=None,
        help='optional zero-based index for which structure to extract')

    args = parser.parse_args()

    outputtype = args.outputtype
    filenames = args.compchemlogfile
    verbose = args.verbose
    terse = args.terse
    future = args.future
    index = args.index
    ghost = args.ghost

    for filename in filenames:

        # We might want to use this option in the near future.
        ccopen_kwargs = dict()
        if future:
            ccopen_kwargs['future'] = True

        print("Attempting to parse {}".format(filename))
        log = ccopen(filename, **ccopen_kwargs)

        if not log:
            print(
                "Cannot figure out what type of computational chemistry output file '{}' is."
                .format(filename))
            print(
                "Report this to the cclib development team if you think this is an error."
            )
            sys.exit()

        if verbose:
            log.logger.setLevel(logging.INFO)
        else:
            log.logger.setLevel(logging.ERROR)
        data = log.parse()

        print("cclib can parse the following attributes from {}:".format(
            filename))
        hasattrs = [
            '  {}'.format(attr) for attr in ccData._attrlist
            if hasattr(data, attr)
        ]
        print('\n'.join(hasattrs))

        # Write out to disk.
        outputdest = '.'.join(
            [os.path.splitext(os.path.basename(filename))[0], outputtype])
        ccwrite_kwargs = dict()
        if future:
            ccwrite_kwargs['future'] = True
        if ghost:
            ccwrite_kwargs['ghost'] = ghost
        # For XYZ files, write the last geometry unless otherwise
        # specified.
        if not index:
            index = -1
        ccwrite_kwargs['jobfilename'] = filename

        # The argument terse presently is only applicable to
        # CJSON/JSON formats
        ccwrite(data,
                outputtype,
                outputdest,
                indices=index,
                terse=terse,
                **ccwrite_kwargs)
Exemplo n.º 25
0
def calculate_uhf(
    dalton_tmpdir,
    hamiltonian=None,
    spin=None,
    operator_label=None,
    operator=None,
    source_moenergies=None,
    source_mocoeffs=None,
    source_operator=None,
):

    if operator_label:
        # TODO add dipvel
        assert operator_label in ("dipole", "angmom", "spinorb")
    assert source_moenergies in ("pyscf", "dalton")
    assert source_mocoeffs in ("pyscf", "dalton")

    dalton_molecule = dalmol.readin(dalton_tmpdir / "DALTON.BAS")
    lines = []
    for atom in dalton_molecule:
        label = atom["label"][0]
        center = atom["center"][0]
        center_str = " ".join(["{:f}".format(pos) for pos in center])
        line = "{:3} {}".format(label, center_str)
        lines.append(line)
    lines = "\n".join(lines)

    # PySCF molecule setup, needed for generating the TEIs in the MO
    # basis.
    mol = pyscf.gto.Mole()
    verbose = 1
    mol.verbose = verbose
    mol.atom = lines
    mol.unit = "Bohr"
    # TODO read basis from DALTON molecule
    mol.basis = "sto-3g"
    mol.symmetry = False
    # TODO read charge from DALTON molecule?
    mol.charge = 1
    # TODO read spin from DALTON molecule?
    mol.spin = 1

    mol.build()

    ifc = sirifc.sirifc(dalton_tmpdir / "SIRIFC")
    occupations = utils.occupations_from_sirifc(ifc)

    if source_moenergies == "pyscf" or source_mocoeffs == "pyscf":
        mf = pyscf.scf.UHF(mol)
        mf.kernel()

    if source_moenergies == "pyscf":
        E_alph = np.diag(mf.mo_energy[0])
        E_beta = np.diag(mf.mo_energy[1])
        E = np.stack((E_alph, E_beta), axis=0)
    elif source_moenergies == "dalton":
        job = ccopen(dalton_tmpdir / "DALTON.OUT")
        data = job.parse()
        # pylint: disable=no-member
        E = np.diag([
            convertor(x, "eV", "hartree") for x in data.moenergies[0]
        ])[np.newaxis, ...]
        E = np.concatenate((E, E), axis=0)
    else:
        pass

    if source_mocoeffs == "pyscf":
        C = mf.mo_coeff
    elif source_mocoeffs == "dalton":
        C = ifc.cmo[0][np.newaxis, ...]
        C = np.concatenate((C, C), axis=0)
    else:
        pass

    solver = iterators.ExactInv(C, E, occupations)

    solver.tei_mo = ao2mo.perform_tei_ao2mo_uhf_partial(mol, C)
    solver.tei_mo_type = "partial"

    driver = cphf.CPHF(solver)

    if operator:
        driver.add_operator(operator)
    elif operator_label:
        if operator_label == "dipole":
            operator_dipole = operators.Operator(label="dipole",
                                                 is_imaginary=False,
                                                 is_spin_dependent=False,
                                                 triplet=False)
            integrals_dipole_ao = mol.intor("cint1e_r_sph", comp=3)
            operator_dipole.ao_integrals = integrals_dipole_ao
            driver.add_operator(operator_dipole)
        elif operator_label == "angmom":
            operator_angmom = operators.Operator(label="angmom",
                                                 is_imaginary=True,
                                                 is_spin_dependent=False,
                                                 triplet=False)
            integrals_angmom_ao = mol.intor("cint1e_cg_irxp_sph", comp=3)
            operator_angmom.ao_integrals = integrals_angmom_ao
            driver.add_operator(operator_angmom)
        elif operator_label == "spinorb":
            operator_spinorb = operators.Operator(label="spinorb",
                                                  is_imaginary=True,
                                                  is_spin_dependent=False,
                                                  triplet=False)
            integrals_spinorb_ao = 0
            for atm_id in range(mol.natm):
                mol.set_rinv_orig(mol.atom_coord(atm_id))
                chg = mol.atom_charge(atm_id)
                integrals_spinorb_ao += chg * mol.intor("cint1e_prinvxp_sph",
                                                        comp=3)
            operator_spinorb.ao_integrals = integrals_spinorb_ao
            driver.add_operator(operator_spinorb)
        else:
            pass
    else:
        pass

    driver.set_frequencies()

    driver.run(solver_type="exact", hamiltonian=hamiltonian, spin=spin)

    return driver.results[0]
Exemplo n.º 26
0
def main(args):
    """The main routine.

    For each frequency calculation output filename passed in:
    1. find the indices that correspond to a CO2 molecule
    2. look for normal mode displacements where a significant fraction
    of the total displacement contains CO2
    3. ...
    """

    if args.csv:
        import csv
        csvfile = open(args.csv_filename, 'w')
        csvwriter = csv.writer(csvfile)

    print("Using threshold of {} for fraction of normal mode displacement".format(args.thresh))
    filenames = args.filename
    for filename in filenames:

        job = ccopen(filename)

        try:
            data = job.parse()
        except StopIteration:
            print("Couldn't parse {}".format(filename), file=sys.stderr)
            continue

        # print(filename)

        # If from a geometry optimization, always take the last
        # geometry.
        geometries = data.atomcoords[-1]
        atoms = data.atomnos

        # Find the indices corresponding to the CO2.
        start_indices = find_CO2_atom_indices(atoms, geometries)
        # print("Starting indices:", start_indices)

        assert isinstance(start_indices, list)

        try:
            vibfreqs = data.vibfreqs
            vibirs = data.vibirs
            vibdisps = data.vibdisps
        except AttributeError:
            print("Couldn't parse frequencies from {}".format(filename), file=sys.stderr)
            continue

        for start in start_indices:
            # print('vibfreqs:', len(vibfreqs))
            # print('vibdisps:', vibdisps.shape)
            # print('geometries:', geometries[-1].shape)
            # predicted = (3*geometries[-1].shape[0]) - 6
            # print('3N-6:', (3*geometries[-1].shape[0]) - 6)
            # print('3N-5:', predicted - 1)
            # if len(vibfreqs) < predicted:
            #     print('degeneracy')
            # else:
            #     print('no degeneracy')
            modeindices = find_CO2_mode_indices(start, vibdisps, thresh=args.thresh)
            pairs = [(vibfreqs[i], vibirs[i]) for i in modeindices]
            # print('Mode indices:')
            # for mi in modeindices:
            #     print(mi, end=' ')
            # print('')
            # print('Frequencies:')
            print(filename, end=' ')
            for p in pairs:
                print('({:.2f}, {:.2f})'.format(*p), end=' ')
            print('')
            if args.csv:
                if len(pairs) > 0:
                    row = [filename]
                    row.extend(['{}'.format(round(p[0], 2)) for p in pairs])
                    csvwriter.writerow(row)
def main(args):
    if args['--print_args']:
        print(args)

    outputfilename = args['<outputfilename>']
    stub = os.path.splitext(outputfilename)[0]

    print('-' * 78)
    print(outputfilename)

    # Assume we have an appropriately-named XYZ file.
    xyzfilename = stub + '.xyz'

    # The dE(pair)/dE(total) percentage cutoff for inclusion.
    pct_cutoff = int(args['--pct_cutoff'])

    # pylint: disable=E1101
    cclib_job = ccopen(outputfilename)
    cclib_data = cclib_job.parse()
    n_mo = cclib_data.nmo
    idx_homo = cclib_data.homos[0]
    covpenergies = cclib_data.moenergies[-1]

    fragment_1_to_2 = []
    fragment_2_to_1 = []
    fragment_1_to_2_cutoff = []
    fragment_2_to_1_cutoff = []
    fragment_1_to_2_pairs = []
    fragment_2_to_1_pairs = []

    # Parse the COVP fragment print block for each fragment.
    with open(outputfilename) as outputfile:
        for line in outputfile:
            if 'From fragment 1 to fragment 2' in line:
                fragment_1_to_2_tot = parse_fragment_block(outputfile, fragment_1_to_2, 1)
            if 'From fragment 2 to fragment 1' in line:
                fragment_2_to_1_tot = parse_fragment_block(outputfile, fragment_2_to_1, 2)

    # Determine the actual orbital indices each COVP corresponds to.
    fragment_indices = determine_fragment_indices(fragment_1_to_2,
                                                  fragment_2_to_1,
                                                  covpenergies,
                                                  n_mo,
                                                  idx_homo)

    fheader = ' {:>5} {:>4} {:>4} {:>7} {:>5} {:>6} {:>5}'
    header = fheader.format('idx', 'occ', 'virt', 'de', 'de%', 'dq', 'dq%')
    fs = ' {:5d} {:4d} {:4d} {:6.4f} {:5.1f} {:6.3f} {:5.1f}'
    fst = ' {:5}           {:6.4f} {:5.1f} {:6.3f} {:5.1f}'
    format_string_net = ' {:5}           {:6.4f}     {:6.3f}'

    print('Fragment 1 -> 2:')
    print(header)
    for entry in fragment_1_to_2:
        if entry['de_alph_pct'] >= pct_cutoff:
            fragment_1_to_2_cutoff.append(entry)
            fragment_1_to_2_pairs.append((entry['orb_occ'], entry['orb_virt']))
            print(fs.format(entry['index'],
                            entry['orb_occ'],
                            entry['orb_virt'],
                            entry['de_alph'],
                            entry['de_alph_pct'],
                            entry['dq_alph'],
                            entry['dq_alph_pct']))
    # Print out the total for all COVPs within the cutoff.
    fragment_1_to_2_cutoff_totals = {
        'de_alph': 0.0, 'de_alph_pct': 0.0, 'dq_alph': 0.0, 'dq_alph_pct': 0.0
    }
    for entry in fragment_1_to_2_cutoff:
        fragment_1_to_2_cutoff_totals['de_alph'] += entry['de_alph']
        fragment_1_to_2_cutoff_totals['de_alph_pct'] += entry['de_alph_pct']
        fragment_1_to_2_cutoff_totals['dq_alph'] += entry['dq_alph']
        fragment_1_to_2_cutoff_totals['dq_alph_pct'] += entry['dq_alph_pct']
    print(fst.format('Tot1C',
                     fragment_1_to_2_cutoff_totals['de_alph'],
                     fragment_1_to_2_cutoff_totals['de_alph_pct'],
                     fragment_1_to_2_cutoff_totals['dq_alph'],
                     fragment_1_to_2_cutoff_totals['dq_alph_pct']))
    # Print out the total line at the end of the block, which is for
    # *all* COVPs.
    print(fst.format(fragment_1_to_2_tot['index'],
                     fragment_1_to_2_tot['de_alph'],
                     fragment_1_to_2_tot['de_alph_pct'],
                     fragment_1_to_2_tot['dq_alph'],
                     fragment_1_to_2_tot['dq_alph_pct']))
    print('Fragment 2 -> 1:')
    print(header)
    for entry in fragment_2_to_1:
        if entry['de_alph_pct'] >= pct_cutoff:
            fragment_2_to_1_cutoff.append(entry)
            fragment_2_to_1_pairs.append((entry['orb_occ'], entry['orb_virt']))
            print(fs.format(entry['index'],
                            entry['orb_occ'],
                            entry['orb_virt'],
                            entry['de_alph'],
                            entry['de_alph_pct'],
                            entry['dq_alph'],
                            entry['dq_alph_pct']))
    # Print out the total for all COVPs within the cutoff.
    fragment_2_to_1_cutoff_totals = {
        'de_alph': 0.0, 'de_alph_pct': 0.0, 'dq_alph': 0.0, 'dq_alph_pct': 0.0
    }
    for entry in fragment_2_to_1_cutoff:
        fragment_2_to_1_cutoff_totals['de_alph'] += entry['de_alph']
        fragment_2_to_1_cutoff_totals['de_alph_pct'] += entry['de_alph_pct']
        fragment_2_to_1_cutoff_totals['dq_alph'] += entry['dq_alph']
        fragment_2_to_1_cutoff_totals['dq_alph_pct'] += entry['dq_alph_pct']
    print(fst.format('Tot2C',
                     fragment_2_to_1_cutoff_totals['de_alph'],
                     fragment_2_to_1_cutoff_totals['de_alph_pct'],
                     fragment_2_to_1_cutoff_totals['dq_alph'],
                     fragment_2_to_1_cutoff_totals['dq_alph_pct']))
    # Print out the total line at the end of the block, which is for
    # *all* COVPs.
    print(fst.format(fragment_2_to_1_tot['index'],
                     fragment_2_to_1_tot['de_alph'],
                     fragment_2_to_1_tot['de_alph_pct'],
                     fragment_2_to_1_tot['dq_alph'],
                     fragment_2_to_1_tot['dq_alph_pct']))

    # Now that we've printed out the totals for each fragment, print
    # out the net values for each direction.
    fragment_1_to_2_net = {
        'de_alph': 0.0, 'de_alph_pct': 0.0, 'dq_alph': 0.0, 'dq_alph_pct': 0.0
    }
    fragment_2_to_1_net = {
        'de_alph': 0.0, 'de_alph_pct': 0.0, 'dq_alph': 0.0, 'dq_alph_pct': 0.0
    }
    fragment_1_to_2_net['de_alph'] = fragment_1_to_2_tot['de_alph'] - fragment_2_to_1_tot['de_alph']
    fragment_1_to_2_net['dq_alph'] = fragment_1_to_2_tot['dq_alph'] - fragment_2_to_1_tot['dq_alph']
    fragment_2_to_1_net['de_alph'] = fragment_2_to_1_tot['de_alph'] - fragment_1_to_2_tot['de_alph']
    fragment_2_to_1_net['dq_alph'] = fragment_2_to_1_tot['dq_alph'] - fragment_1_to_2_tot['dq_alph']
    print('Net CT into Fragment 1:')
    print(format_string_net.format('',
                                   fragment_2_to_1_net['de_alph'],
                                   fragment_2_to_1_net['dq_alph']))
    print('Net CT into Fragment 2:')
    print(format_string_net.format('',
                                   fragment_1_to_2_net['de_alph'],
                                   fragment_1_to_2_net['dq_alph']))

    if args['--plot-separate']:
        # Write VMD scripts for plotting.
        xyzfilename = args['--plot-separate']
        dump_vmd_separate(fragment_1_to_2_pairs, fragment_2_to_1_pairs, n_mo, xyzfilename)

    if args['--plot-combined']:
        # Write a VMD script for plotting.
        xyzfilename = args['--plot-combined']
        dump_vmd_combined(fragment_1_to_2_pairs, fragment_2_to_1_pairs, n_mo, xyzfilename)

    if args['--df']:
        import pandas as pd
        # Write results to JSON/Excel files using Pandas.
        dump_pandas(fragment_1_to_2_cutoff, fragment_2_to_1_cutoff, stub)

    if args['--del']:
        maxlen = 0
        # first, find the maximum length of the number field
        # (from Python objects, not the filesystem)
        fragment_entries = fragment_1_to_2 + fragment_2_to_1
        for entry in fragment_entries:
            newlen = max(len(str(entry['orb_occ'])),
                         len(str(entry['orb_virt'])))
            if newlen > maxlen:
                maxlen = newlen
        template = 'mo.{}.cube'
        for entry in fragment_entries:
            if entry['de_alph_pct'] < pct_cutoff:
                orb_occ = pad_left_zeros(entry['orb_occ'], maxlen)
                orb_virt = pad_left_zeros(entry['orb_virt'], maxlen)
                orb_occ_filename = template.format(orb_occ)
                orb_virt_filename = template.format(orb_virt)
                print('Deleting ' + orb_occ_filename)
                print('Deleting ' + orb_virt_filename)
                try:
                    os.remove(orb_occ_filename)
                except OSError:
                    print("Can't remove " + orb_occ_filename)
                try:
                    os.remove(orb_virt_filename)
                except OSError:
                    print("Can't remove " + orb_virt_filename)

    print('-' * 78)

    return (fragment_1_to_2_cutoff,
            fragment_2_to_1_cutoff,
            fragment_1_to_2_tot,
            fragment_2_to_1_tot)
Exemplo n.º 28
0
def get_job_data(filename):
    """"""
    job = ccopen(filename)
    data = job.parse()
    return data
Exemplo n.º 29
0
        density.data = density.data*2. # doubly-occupied
    
    return density


if __name__=="__main__":

    try:
        import psyco
        psyco.full()
    except ImportError:
        pass

    from cclib.io import ccopen
    import logging
    a = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp_basis.log")
    a.logger.setLevel(logging.ERROR)
    c = a.parse()
    
    b = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp.out")
    b.logger.setLevel(logging.ERROR)
    d = b.parse()

    vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
    wavefn = wavefunction(d.atomcoords[0], d.mocoeffs[0][d.homos[0]],
                          c.gbasis, vol)
    assert abs(wavefn.integrate())<1E-6 # not necessarily true for all wavefns
    assert abs(wavefn.integrate_square() - 1.00)<1E-3 #   true for all wavefns
    print(wavefn.integrate(), wavefn.integrate_square())

    vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
Exemplo n.º 30
0
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument('outputtype',
                        choices=('json', 'cjson', 'cml', 'xyz', 'molden', 'wfx'),
                        help='the output format to write (json/cjson are identical)')
    parser.add_argument('compchemlogfile',
                        nargs='+',
                        help='one or more computational chemistry output files to parse and convert')

    parser.add_argument('-v', '--verbose',
                        action='store_true',
                        help='more verbose parsing output (only errors by default)')

    parser.add_argument('-t', '--terse',
                        action='store_true',
                        help='CJSON by default is not indented for readability, saves space (indented for readability\'s sake)')

    parser.add_argument('-u', '--future',
                        action='store_true',
                        help='use experimental features (currently optdone_as_list)')

    parser.add_argument('-i', '--index',
                        type=int,
                        default=None,
                        help='optional zero-based index for which structure to extract')

    args = parser.parse_args()

    outputtype = args.outputtype
    filenames = args.compchemlogfile
    verbose = args.verbose
    terse = args.terse
    future = args.future
    index = args.index

    for filename in filenames:

        # We might want to use this option in the near future.
        ccopen_kwargs = dict()
        if future:
            ccopen_kwargs['future'] = True

        print("Attempting to parse {}".format(filename))
        log = ccopen(filename, **ccopen_kwargs)

        if log == None:
            print("Cannot figure out what type of computational chemistry output file '{}' is.".format(filename))
            print("Report this to the cclib development team if you think this is an error.")
            sys.exit()

        if verbose:
            log.logger.setLevel(logging.INFO)
        else:
            log.logger.setLevel(logging.ERROR)
        data = log.parse()

        print("cclib can parse the following attributes from {}:".format(filename))
        hasattrs = ['  {}'.format(attr) for attr in ccData._attrlist if hasattr(data, attr)]
        print('\n'.join(hasattrs))

        # Write out to disk.
        outputdest = '.'.join([os.path.splitext(os.path.basename(filename))[0], outputtype])
        ccwrite_kwargs = dict()
        if future:
            ccwrite_kwargs['future'] = True
        # For XYZ files, write the last geometry unless otherwise
        # specified.
        if not index:
            index = -1
        ccwrite_kwargs['jobfilename'] = filename

        # The argument terse presently is only applicable to
        # CJSON/JSON formats
        ccwrite(data, outputtype, outputdest, terse=terse, indices=index,
                **ccwrite_kwargs)
Exemplo n.º 31
0
def get_job_data(filename):
    """"""
    job = ccopen(filename)
    data = job.parse()
    return data
    parser = argparse.ArgumentParser()

    parser.add_argument('outputfilename', nargs='+')

    args = parser.parse_args()

    return args


if __name__ == '__main__':

    args = getargs()

    scfenergies = []

    for outputfilename in args.outputfilename:
        if 'cfour' in outputfilename.lower():
            program = 'CFOUR'
            scfenergy = get_energy_nocclib(outputfilename, 'E(SCF)=', 1)
        else:
            job = ccopen(outputfilename)
            program = program_names[type(job)]
            data = job.parse()
            scfenergy = convertor(data.scfenergies[0], 'eV', 'hartree')
        scfenergies.append((program, outputfilename, scfenergy))

    scfenergies = sorted(scfenergies, key=lambda x: x[2])

    for (program, outputfilename, scfenergy) in scfenergies:
        print(scfenergy, program, outputfilename)
Exemplo n.º 33
0
        density.data = density.data*2. # doubly-occupied
    
    return density


if __name__=="__main__":

    try:
        import psyco
        psyco.full()
    except ImportError:
        pass

    from cclib.io import ccopen
    import logging
    a = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp_basis.log")
    a.logger.setLevel(logging.ERROR)
    c = a.parse()
    
    b = ccopen("../../../data/Gaussian/basicGaussian03/dvb_sp.out")
    b.logger.setLevel(logging.ERROR)
    d = b.parse()

    vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
    wavefn = wavefunction(d.atomcoords[0], d.mocoeffs[0][d.homos[0]],
                          c.gbasis, vol)
    assert abs(wavefn.integrate())<1E-6 # not necessarily true for all wavefns
    assert abs(wavefn.integrate_square() - 1.00)<1E-3 #   true for all wavefns
    print(wavefn.integrate(), wavefn.integrate_square())

    vol = Volume( (-3.0,-6,-2.0), (3.0, 6, 2.0), spacing=(0.25,0.25,0.25) )
Exemplo n.º 34
0
 def quiet_ccopen(logfile):
     log = ccopen(logfile)
     logging.disable(logging.WARNING)
     data = log.parse()
     logging.disable(logging.NOTSET)
     return data
def generate_block(args):
    '''Create the %plots block based upon command-line arguments passed in
    through the args dictionary.
    '''
    # Handle the file prefix first.
    if args['--prefix'] is None:
        prefix = ''
    else:
        prefix = args['--prefix'] + '.'

    # The "block" will be formed by generating bunch of strings,
    # appending them to this list, then calling list.join once
    # everything's done.
    block_parts = ['%plots']

    block_parts.append(' format gaussian_cube')
    block_parts.append(' dim1 {}'.format(args['--dim']))
    block_parts.append(' dim2 {}'.format(args['--dim']))
    block_parts.append(' dim3 {}'.format(args['--dim']))

    # If we desire to use cclib and automate some things...
    if args['--cclib']:
        # pylint: disable=E1101
        from cclib.io import ccopen
        job = ccopen(args['--cclib'])
        data = job.parse()
        plot_range = data.homos[0] * 2
        if args['--cclib_all']:
            plot_range = data.nmo
        # cclib-discovered values take precedence
        if args['--canon']:
            args['--canon'] = list(range(plot_range))
        if args['--uno']:
            args['--uno'] = list(range(plot_range))

    if args['--eldens']:
        block_parts.append(' ' + eldens_string(prefix))
    if args['--spindens']:
        block_parts.append(' ' + spindens_string(prefix))

    # Limit the number of orbitals we're going to generate.
    if args['--max']:
        maxorb = int(args['--max'])
        if args['--canon']:
            args['--canon'] = [i for i in args['--canon'] if i <= maxorb]
        if args['--uno']:
            args['--uno'] = [i for i in args['--uno'] if i <= maxorb]

    # Plot the UNOs first due to an 'operator' bug in ORCA.
    if args['--uno']:
        # We're always either a string or a list.
        if isinstance(args['--uno'], str):
            splitstr = args['--uno'].split(',')
        else:
            splitstr = args['--uno']
        if len(splitstr) == 2:
            args['--uno'] = pad_left_zeros_l(
                range(int(splitstr[0]),
                      int(splitstr[1]) + 1))
        else:
            args['--uno'] = pad_left_zeros_l(arg_to_list(args['--uno']))
        for uno_num in args['--uno']:
            block_parts.append(' ' + uno_string(prefix, uno_num))

    if args['--canon']:
        # We're always either a string or a list.
        if isinstance(args['--canon'], str):
            splitstr = args['--canon'].split(',')
        else:
            splitstr = args['--canon']
        if len(splitstr) == 2:
            args['--canon'] = pad_left_zeros_l(
                range(int(splitstr[0]),
                      int(splitstr[1]) + 1))
        else:
            args['--canon'] = pad_left_zeros_l(arg_to_list(args['--canon']))
        for mo_num in args['--canon']:
            block_parts.append(' ' + mo_string(prefix, mo_num, 0))
            if args['--beta']:
                block_parts.append(' ' + mo_string(prefix, mo_num, 1))

    block_parts.append(' end')

    block = '\n'.join(block_parts)

    return block