Example #1
0
def get_g09_obj(file_name, parallel=True):
    '''
    This function will find the g09 executable and the corresponding openmpi
    executable.  It will handle errors accordingly.
    '''
    g09_path = which(file_name)
    assert g09_path is not None,\
        "Error - Unable to find %s in PATH variable." % file_name

    # If running in parallel, ensure we have the correct version of openmpi
    if parallel:
        # Find mpi
        ompi_path = which("mpiexec")
        p = subprocess.Popen([ompi_path, "--V"],
                             shell=False,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        stdout = str(p.stdout.read().decode("utf-8").strip())
        # stderr = str(p.stderr.read().decode("utf-8").strip())

        # Simple check for openmpi
        assert "mpi" in stdout.lower(),\
            "Error - Unable to access mpi.  Please ensure it is in your \
PATH environment variable!"

    return g09_path
Example #2
0
def get_jdftx_obj(parallel=True):
    '''
    This function will find the jdftx executable and a corresponding mpi
    executable.  It will handle errors accordingly.

    **Parameters**

        parallel: *bool, optional*
            Whether to get corresponding mpiexec info or not.

    **Returns**

        jdftx_path: *str*
            Path to a lammps executable.
        mpi_path: *str*
            Path to an mpi executable.
    '''

    raise Exception("ERROR - NEED TO FIX/UPDATE")

    # First, look for jdftx_X in order of common names
    jdftx_path = which("jdftx")
    jdftx_path_scripts = None
    assert jdftx_path is not None,\
        "Error - Please unable to find jdftx executable.  Please ensure it is \
in your PATH environment variable!"
    return jdftx_path, jdftx_path_scripts
Example #3
0
def _get_vmd_path():
    vmd_path = which("vmd")
    assert vmd_path is not None,\
        "Error - Unable to access VMD.  Please ensure it is in your PATH \
environment variable!"

    return vmd_path
Example #4
0
def get_slurm_queues():
    '''
    Get a list of all available queues to submit a job to.

    **Returns**

        avail_queues: *list, str*
            A list of available queues by name.
    '''
    sinfo_path = which("sinfo")
    assert sinfo_path is not None,\
        "Error - Unable to find sinfo in PATH."

    p = subprocess.Popen([sinfo_path],
                         shell=False,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    all_queues = p.stdout.read().decode("utf-8").strip()
    if all_queues == '':
        close_pipes(p)
        return []
    all_queues = all_queues.split("\n")[1:]
    all_queues = [q.split()[0] for q in all_queues if q.split()[1] == 'up']
    all_queues = list(set(all_queues))
    close_pipes(p)
    return [q if "*" not in q else q.replace("*", "") for q in all_queues]
Example #5
0
def get_queue_manager():
    '''
    This function will determine what the current queueing system is, and
    return relevant functionality.

    **Returns**

        queue_manager: *str*
            The name of the queue manager as either slurm, nbs, or None.
    '''
    # Determine the queuing system
    sbatch = which("sbatch")
    jsub = which("jsub")
    if sbatch is not None:
        return "slurm"
    elif jsub is not None:
        return "nbs"
    else:
        return None
Example #6
0
def get_packmol_obj():
    '''
    This function will find the packmol executable and handle errors
    accordingly.

    **Returns**

        packmol_path: *str*
            The path to packmol.
    '''
    packmol_path = which("packmol")

    assert packmol_path is not None and packmol_path != "",\
        "Error - Unable to access Packmol.  Please ensure it is in your PATH \
environment variable!"

    return packmol_path
Example #7
0
def get_nbs_queues():
    '''
    Get a list of all available queues to submit a job to.

    **Returns**

        avail_queues: *list, str*
            A list of available queues by name.
    '''
    qlist_path = which("qlist")
    p = subprocess.Popen([qlist_path],
                         shell=False,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    all_queues = p.stdout.read().decode("utf-8").strip().split('\n')[:-1]
    all_queues = [a.split() for a in all_queues]
    all_queues = [a[0] for a in all_queues if len(a) > 1]
    close_pipes(p)
    return [
        a.lower() for a in all_queues
        if a.lower() not in ["queue", "name", ""]
    ]
Example #8
0
def get_ovito_obj(version="2.9.0"):
    '''
    This function returns the ovito object.  Note, currently the code below
    only works on version 2.9.0.
    '''
    ovito_path = which("ovitos")

    # Determine version
    ovito_pipe = subprocess.Popen([ovito_path, "-v"],
                                  shell=False,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)
    stdout = str(ovito_pipe.stdout.read().decode("utf-8").strip())

    assert "Ovito" in stdout,\
        "Error - Unable to access Ovito.  Please ensure it is in your PATH \
environment variable!"
    assert version in stdout,\
        "Error - Incorrect Ovito version!  It should be %s, but is %s."\
        % (version, stdout.strip().split()[-1])

    close_pipes(ovito_pipe)

    return ovito_path
Example #9
0
def ovito_xyz_to_gif(frames,
                     scratch,
                     fname="image",
                     camera_pos=(10, 0, 0),
                     camera_dir=(-1, 0, 0),
                     size=(800, 600),
                     delay=10,
                     display_cell=False,
                     renderer="OpenGLRenderer",
                     renderer_settings={},
                     overwrite=False):
    '''
    This function will, using the ovito python api, generate either a single
    image or a gif of the input frames.  Note, a gif is only generated when
    more than one frame exists.

    **Parameters**

        frames: *str* or *list,* :class:`squid.structures.atom.Atom`
            A list of frames you wish to generate an image for, or a path to
            an xyz file.
        scratch: *str*
            A directory you want to have each image saved to.
        fname: *str, optional*
            The prefix for the image names.
        camera_pos: *tuple, float, optional*
            A tuple of x, y, and z coordinates for the camera to be positioned.
        camera_dir: *tuple, float, optional*
            The direction the camera is facing.
        size: *tuple, int, optional*
            Image size (width, height).
        delay: *int, optional*
            In the event of a gif, how long it should play for.
        display_cell: *bool, optional*
            Whether to display the box around the system or not.
        renderer: *str, optional*
            What kind of renderer you wish to use: OpenGL or Tachyon.
        renderer_settings: *dict, optional*
            Here you can change specific renderer settings.
        overwrite: *bool, optional*
            Whether to delete any files already existing in the scratch dir.

    **Returns**

        None
    '''

    convert_path = which("convert")
    assert convert_path is not None,\
        "Error - Cannot find convert in the PATH env var."

    if fname.endswith(".gif"):
        fname.replace(".gif", "")

    # First ensure we have frames and things in the correct format
    if isinstance(frames, str):
        frames = open(frames)
    if not isinstance(frames[0], list):
        frames = [frames]
    if not scratch.endswith("/"):
        scratch += "/"

    # Next, ensure scratch exists
    if not os.path.exists(scratch):
        os.system("mkdir -p %s" % scratch)
    elif len(os.listdir(scratch)) > 0:
        if overwrite:
            os.system("rm %s/*.png" % scratch)
        else:
            raise Exception("Error - Scratch directory is not empty!")

    # For each frame, generate an image
    for i, frame in enumerate(frames):
        write_xyz(frame, "tmp.xyz")
        ovito_xyz_to_image("tmp.xyz",
                           scratch,
                           fname="%04d" % i,
                           camera_pos=camera_pos,
                           camera_dir=camera_dir,
                           size=size,
                           renderer=renderer,
                           renderer_settings=renderer_settings,
                           display_cell=display_cell)
        os.system("rm tmp.xyz")

    # If more than one frame exists, compile to gif
    if len(frames) > 1:
        cmd = "convert -delay $DELAY -loop 0 $(ls -v $PATH/*.png) output.gif"
        holders = [("$PATH", str(scratch)), ("$DELAY", str(delay))]
        for s_id, val in holders:
            cmd = cmd.replace(s_id, val)
        os.system(cmd)
        os.rename("output.gif", fname + ".gif")
Example #10
0
def get_job(s_flag, detail=0):
    '''
    Get a list of all jobs currently on your queue.  From this, only return
    the values that have s_flag in them.  The *detail* variable can be used
    to specify how much information you want returned.

    **Parameters**

        s_flag: *str*
            A string to parse out job information with.
        detail: *int, optional*
            The amount of information you want returned.

    **Returns**

        all_jobs: *list*
            Depending on *detail*, you get the following:

                - *details* =0: *list, str*
                    List of all jobs on the queue.

                - *details* =1: *list, tuple, str*
                    List of all jobs on the queue as:
                        (job name, time run, job status)

                - *details* =2: *list, tuple, str*
                    List of all jobs on the queue as:
                        (job name,
                         time run,
                         job status,
                         queue,
                         number of processors)
    '''
    detail = int(detail)
    sacct_path = which("sacct")
    sacct_format_string =\
        "--format=User%30,JobName%50,JobID,State,Partition,NCPUS,Elapsed"

    # Get a list of jobs that are pending or running
    # Note - instead of using JobIDRaw, we use JobID and parse out
    # the _ from any job arrays.  This was a potential issue when we wait
    # on a job array to finish and end up thinking the job was done
    # prematurely.
    SACCT_SLEEP_TIMER = 60.0
    read_successful = False
    output, output_error = "", ""
    for i in range(50):
        cmd = [sacct_path, sacct_format_string]
        p = subprocess.Popen(cmd,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        output = p.stdout.read().decode("utf-8").strip()
        output_error = p.stderr.read().decode("utf-8")
        # If we successfully called sacct, break the for loop
        if output_error is None or output_error.strip() == "":
            read_successful = True
            break
        else:
            time.sleep(SACCT_SLEEP_TIMER)
    if not read_successful:
        print("\nFailed to communicate with sacct!")
        print("--------------- JOB OUTPUT ---------------")
        print(output)
        print("---------------- JOB ERROR ---------------")
        print(output_error)
        print("---------------------------------")
        sys.stdout.flush()
        raise Exception()

    VALID_STR = ["pending", "running"]
    output = [
        line.strip() for line in output.split("\n")
        if any([s.lower() in line.lower() for s in VALID_STR])
    ]

    INDICES = {
        "user": 0,
        "jobname": 1,
        "jobid": 2,
        "state": 3,
        "queue": 4,
        "nprocs": 5,
        "time": 6
    }

    all_jobs = [
        job.strip().split() for job in output if getpass.getuser() in job
    ]

    # Clean up all_jobs in case of job arrays.
    for i, local_job in enumerate(all_jobs):
        jobID = local_job[INDICES["jobid"]]
        if "_" in jobID:
            all_jobs[i][INDICES["jobid"]] = jobID.split("_")[0]

    if detail == 3:
        all_jobs = [
            j[INDICES["jobid"]] for j in all_jobs
            if s_flag == j[INDICES["state"]].strip()
        ]
    elif detail == 2:
        all_jobs = [
            (j[INDICES["jobname"]], j[INDICES["time"]], j[INDICES["state"]],
             j[INDICES["jobid"]], j[INDICES["queue"]], j[INDICES["nprocs"]])
            for j in all_jobs if s_flag == j[INDICES["state"]].strip()
        ]
    elif detail == 1:
        all_jobs = [(j[INDICES["jobname"]], j[INDICES["time"]],
                     j[INDICES["state"]], j[INDICES["jobid"]])
                    for j in all_jobs if s_flag == j[INDICES["state"]].strip()]
    else:
        all_jobs = [
            j[INDICES["jobname"]] for j in all_jobs
            if s_flag == j[INDICES["state"]].strip()
        ]
    return all_jobs
Example #11
0
def pysub(name, **kwargs):
    '''
    Submission of python scripts to run on your queue.

    **Parameters**

        name: *str*
            Name of the python script (with or without the .py extension).
        ompi_threads: *int, optional*
            The number OMP_NUM_THREADS should be manually assigned to.
        preface_mpi: *bool, optional*
            Whether to run python via mpirun or not.
        path: *str, optional*
            What directory your python script resides in. Note, this does NOT
            have a trailing /.
        args: *list, str, optional*
            A list of arguments to pass to the python script on the queue.
        jobarray: *tuple, int, optional*
            Specifies a job array of this python script should be run.  In
            this case, the python script is submitted with a final argument
            corresponding to the index of the job array.  NOTE - This will
            only work on SLURM.
        modules: *list, str, optional*
            A list of modules to load prior to running this python script.
            Requires an installed version of lmod.
        kwargs: *...*
            Any other keywords necessary for a given job submission script
            (NBS/SLURM).  See the other submission sections for more details.

    **Returns**

        None
    '''
    queueing_system = get_queue_manager()
    # Assess if we need to do mpirun or not
    params = {
        "ntasks": 1,
        "nprocs": 1,
        "cpus_per_task": 1,
        "ompi_threads": None,
        "preface_mpi": False,
        "path": os.getcwd(),
        "jobarray": None,
        "queue": None,
        "args": None,
        "modules": None,
        "use_hyperthreads": False,
    }
    params.update(kwargs)

    # Ensure variables of correct types
    param_types = {
        "ntasks": int,
        "cpus_per_task": int,
        "nprocs": int,
        "preface_mpi": bool,
        "path": lambda s: s if not s.endswith("/") else s[:-1]
    }

    # Hyperthread support flag
    hyperthread_flag = "--use-hwthread-cpus"

    for k, f in param_types.items():
        params[k] = f(params[k])

    if name.endswith(".py"):
        name = '.py'.join(name.split(".py")[:-1])

    if isinstance(params["queue"], str) and params["queue"].lower() == "none":
        params["queue"] = None

    assert not all(["cpus_per_task" in kwargs, "nprocs" in kwargs]),\
        "Error - If specifying cpus_per_task, then specify ntasks, not nprocs."

    total_cores = params["ntasks"] * params["nprocs"] * params["cpus_per_task"]

    # Begin compiling together command
    cmd = ""

    if params["ompi_threads"] is not None:
        cmd += '''
export OMPI_NUM_THREADS=%d
''' % int(params["ompi_threads"])

    if params["modules"] is not None:
        for module in params["modules"]:
            cmd += "module load %s\n" % module

    if total_cores > 1 and params["preface_mpi"]:
        mpirun_path = which("mpirun")
        assert mpirun_path is not None,\
            "Error - Unable to find mpirun path!"
        cmd += "%s -np %d " % (mpirun_path, total_cores)
        if params["use_hyperthreads"]:
            cmd += "%s " % hyperthread_flag

    python_path = which("python")
    assert python_path is not None,\
        "Error - Somehow not able to find python."

    cmd += "%s -u %s/%s.py" % (python_path, params["path"], name)

    if params["args"] is not None:
        assert isinstance(params["args"], list),\
            "Error - args for pysub must be a list of strings."
        assert all([isinstance(s, str) for s in params["args"]]),\
            "Error - args for pysub must be a list of strings."

        cmd += " " + " ".join(params["args"])

    local_ja = queueing_system is None and params["jobarray"] is not None

    if local_ja:
        cmd += " $JA1$"

    if params["queue"] is None or queueing_system is None:
        cmd += " & disown"

    if params["queue"] == "debugger":
        print("\nWould have submitted job %s\n" % name)
        return Job(None)

    if queueing_system is None or params["queue"] is None:
        stdout_file = open(
            "%s/%s%s.log" %
            (params["path"], ".$JA2$" if local_ja else "", name), "wb")
        stderr_file = open(
            "%s/%s%s.err" %
            (params["path"], ".$JA2$" if local_ja else "", name), "wb")
        # RUN LOCALLY
        all_jobs = []
        if params["jobarray"] is None:
            return Job(name,
                       process_handle=subprocess.Popen(cmd.strip().split(),
                                                       stdout=stdout_file,
                                                       stderr=stderr_file,
                                                       shell=False))
        else:
            lower, upper = map(int, params["jobarray"])
            for i in range(lower, upper + 1):
                local_cmd = cmd.replace("$JA1$", str(i))
                local_cmd = local_cmd.replace("$JA2$", str(i))
                stdout_file_local = stdout_file.replace("$JA2$", str(i))
                stderr_file_local = stderr_file.replace("$JA2$", str(i))
                all_jobs.append(
                    Job(name,
                        process_handle=subprocess.Popen(
                            cmd.strip().split(),
                            stdout=stdout_file_local,
                            stderr=stderr_file_local,
                            shell=False)))
        return all_jobs

    cmd += " > %s/%s%s.log 2>&1" % (params["path"],
                                    ".$JA2$" if local_ja else "", name)

    if queueing_system == "nbs":
        if all(["ntasks" in kwargs, "nprocs" in kwargs]):
            kwargs["nprocs"] = int(kwargs["nprocs"]) * int(kwargs["ntasks"])
        elif "ntasks" in kwargs:
            kwargs["nprocs"] = kwargs["ntasks"]
        # Strip from kwargs anything we don't need here
        del kwargs["ntasks"]
        return nbs.submit_job(name, cmd, **kwargs)
    elif queueing_system == "slurm":
        assert not all(["nprocs" in kwargs, "cpus_per_task" in kwargs]),\
            "Error - specify either nprocs or cpus_per_task, not both"
        if "nprocs" in kwargs.keys():
            kwargs["cpus_per_task"] = kwargs["nprocs"]
            del kwargs["nprocs"]
        if "ntasks" in kwargs:
            # Because on slurm tasks allocate 'more', we sort so that
            # We emphasize allocating tasks over cpus_per_task
            kwargs["cpus_per_task"], kwargs["ntasks"] = sorted(
                [int(kwargs["ntasks"]),
                 int(kwargs["cpus_per_task"])])
        else:
            kwargs["ntasks"] = kwargs["cpus_per_task"]
            del kwargs["cpus_per_task"]
        return slurm.submit_job(name, cmd, **kwargs)
    else:
        raise Exception("Unknown queueing system (%s) encountered." %
                        str(queueing_system))
Example #12
0
def cubegen_analysis(old_job,
                     orbital=None,
                     path='gaussian/',
                     chk_conv=True,
                     skip_potential=False):
    '''
    Post process a g09 job using cubegen and vmd to display molecular orbitals
    and the potential surface.

    **Parameters**

        old_job: *str*
            Gaussian file name.  Only use the name, such as 'water' instead
            of 'water.chk'.  Note, do not pass a path as it is assumed you
            are in the parent directory of the job to analyze.  If not, use
            the path variable.
        orbital: *str, optional*
            The orbital to analyze (0, 1, 2, 3, ...). By default H**O and
            LUMO will be analyzed, thus this only is useful if you wish to
            see other orbitals.
        path: *str, optional*
            Path to where the gaussian job was run.  By default, this is a
            gaussian subfolder.
        chk_conv: *bool, optional*
            Check if the simulation converged before proceeding.  If you only
            have a .chk file and you are certain it converged, this can be
            set to False.
        skip_potential: *bool, optional*
            If you only care about MO's, skip the expensive potential
            surface calculation.

    **Returns**

        None
    '''

    # Error handling
    if not path.endswith("/"):
        path += "/"

    if not os.path.exists('%s%s.chk' % (path, old_job)):
        raise Exception('Fatal error: file "%s%s.chk" does not exist.' %
                        (path, old_job))
    if chk_conv and not parse_atoms(path + old_job):
        raise Exception(
            'Fatal error: "%s" is not converged. gcube does not work on \
    unconverged jobs.' % old_job)
    if orbital is not None:
        orbital = str(orbital)

    # Get the file to check
    if not os.path.exists('%s%s.fchk' % (path, old_job)):
        print('Making %s%s.fchk' % (path, old_job))
        Popen('%s %s%s.chk %s%s.fchk' %
              (get_g09_obj("g09_formchk"), path, old_job, path, old_job),
              shell=True).wait()

    # Make cube files
    a = ["_d", "_p", "_HOMO", "_LUMO"]
    b = ["density", "potential", "MO=H**o", "MO=Lumo"]

    if skip_potential:
        a.remove("_p")
        b.remove("potential")

    if orbital is not None:
        a.append("_MO%s" % orbital)
        b.append("MO=%s" % orbital)
    for append, orbit in zip(a, b):
        if not os.path.exists('%s%s.cube' % (path, old_job + append)):
            print('Making %s%s.cube' % (path, old_job + append))
            Popen('%s 0 %s %s%s.fchk %s%s.cube 0 h' %
                  (get_g09_obj("g09_cubegen"), orbit, path, old_job, path,
                   old_job + append),
                  shell=True).wait()

    # Error handling
    for tail in a:
        if not os.path.exists("%s/%s%s.cube" % (path, old_job, tail)):
            raise Exception('Fatal error: cube files not created')

    # Get cubefile ranges if needed
    def get_cube_range(fptr):
        raw = open(fptr, 'r').read().strip().split("\n")
        N_atoms = int(raw[2].strip().split()[0])
        raw = raw[6 + N_atoms:]
        low = float('inf')
        high = float('-inf')
        for line in raw:
            for val in [float(p) for p in line.strip().split()]:
                if val > high:
                    high = val
                if val < low:
                    low = val
        return low, high

    low1, high1 = get_cube_range("%s%s_LUMO.cube" % (path, old_job))
    low2, high2 = get_cube_range("%s%s_HOMO.cube" % (path, old_job))
    vmd_file = '''# Type logfile console into console to see all commands
    # Get data
    mol new $$PATH$$$$FPTR$$_d.cube
    mol addfile $$PATH$$$$FPTR$$_LUMO.cube
    mol addfile $$PATH$$$$FPTR$$_HOMO.cube
    $POTENTIAL$
    # Adjust first rep
    mol modcolor 0 0 element
    mol modstyle 0 0 CPK
    # Adjust LUMO and H**O Positive Orbital
    mol addrep 0
    mol modcolor 1 0 Volume 1
    mol modstyle 1 0 Isosurface 0.04 1 0 0 1 1
    mol modmaterial 1 0 Transparent
    mol addrep 0
    mol modcolor 2 0 Volume 2
    mol modstyle 2 0 Isosurface 0.04 2 0 0 1 1
    mol modmaterial 2 0 Transparent
    # Adjust LUMO and H**O Negative Orbital
    mol addrep 0
    mol modcolor 3 0 Volume 1
    mol modstyle 3 0 Isosurface -0.04 1 0 0 1 1
    mol modmaterial 3 0 Transparent
    mol addrep 0
    mol modcolor 4 0 Volume 2
    mol modstyle 4 0 Isosurface -0.04 2 0 0 1 1
    mol modmaterial 4 0 Transparent
    $POTENTIAL2$
    # Change the color scale
    mol scaleminmax 0 1 -100000 $$HIGH1$$
    mol scaleminmax 0 2 -100000 $$HIGH2$$
    mol scaleminmax 0 3 $$LOW1$$ 100000
    mol scaleminmax 0 4 $$LOW2$$ 100000
    $POTENTIAL3$'''.replace('$$FPTR$$', old_job)

    if skip_potential:
        vmd_file = vmd_file.replace("$POTENTIAL$", "")
        vmd_file = vmd_file.replace("$POTENTIAL2$", "")
        vmd_file = vmd_file.replace("$POTENTIAL3$", "")
    else:
        vmd_file = vmd_file.replace("$POTENTIAL$",
                                    "mol addfile $$PATH$$$$FPTR$$_p.cube")
        vmd_file = vmd_file.replace(
            "$POTENTIAL2$", '''
    # Adjust Potential Surface rep
    mol addrep 0
    mol modcolor 5 0 Volume 3
    mol modstyle 5 0 Isosurface 0.040000 0 0 0 1 1
    mol modmaterial 5 0 Transparent''')
        vmd_file = vmd_file.replace("$POTENTIAL3",
                                    "mol scaleminmax 0 5 -0.100000 0.050000")
    while "$$HIGH1$$" in vmd_file:
        vmd_file = vmd_file.replace("$$HIGH1$$", str(high1))
    while "$$LOW1$$" in vmd_file:
        vmd_file = vmd_file.replace("$$LOW1$$", str(low1))

    while "$$HIGH2$$" in vmd_file:
        vmd_file = vmd_file.replace("$$HIGH2$$", str(high2))
    while "$$LOW2$$" in vmd_file:
        vmd_file = vmd_file.replace("$$LOW2$$", str(low2))

    if orbital is not None:
        low3, high3 = get_cube_range("%s%s_MO%s.cube" %
                                     (path, old_job, orbital))
        print("%f %f" % (low3, high3))
        vmd_file += '''
    # Adding in extra MO
    mol addfile $$PATH$$$$FPTR$$_MO$$ORBITAL$$.cube
    # Adjust MO positive orbital
    mol addrep 0
    mol modcolor 6 0 Volume 4
    mol modstyle 6 0 Isosurface 0.04 4 0 0 1 1
    mol modmaterial 6 0 Transparent
    # Adjust MO Negative orbital
    mol addrep 0
    mol modcolor 7 0 Volume 4
    mol modstyle 7 0 Isosurface -0.04 4 0 0 1 1
    mol modmaterial 7 0 Transparent
    # Change color scale
    mol scaleminmax 0 6 -100000 $$HIGH3$$
    mol scaleminmax 0 7 $$LOW3$$ 100000'''

        while "$$HIGH3$$" in vmd_file:
            vmd_file = vmd_file.replace("$$HIGH3$$", str(high3))
        while "$$LOW3$$" in vmd_file:
            vmd_file = vmd_file.replace("$$LOW3$$", str(low3))
        while "$$ORBITAL$$" in vmd_file:
            vmd_file = vmd_file.replace("$$ORBITAL$$", str(orbital))
        while "$$FPTR$$" in vmd_file:
            vmd_file = vmd_file.replace('$$FPTR$$', old_job)

    while "$$PATH$$" in vmd_file:
        vmd_file = vmd_file.replace("$$PATH$$", path)

    f = open('tmp.vmd', 'w')
    f.write(vmd_file)
    f.close()

    disp = '''

    Representations are as follows:

        1 - CPK of atoms
        2 - LUMO Positive
        3 - H**O Positive
        4 - LUMO Negative
        5 - H**O Negative
        $POTENTIAL$
        $$MO$$

    '''

    if orbital is not None:
        disp = disp.replace("$$MO$$", "7 - MO %s" % orbital)
    else:
        disp = disp.replace("$$MO$$", "")

    if skip_potential:
        disp = disp.replace("$POTENTIAL$", "")
    else:
        disp = disp.replace("$POTENTIAL$", "6 - Potential Surface")

    disp = print_helper.color_set(disp, 'BLUE')
    disp = print_helper.color_set(disp, 'BOLD')

    print(disp)

    Popen('%s -e tmp.vmd' % which("vmd"), shell=True)
Example #13
0
def get_job(s_flag, detail=0):
    '''
    Get a list of all jobs currently on your queue.  From this, only return
    the values that have s_flag in them.  The *detail* variable can be used
    to specify how much information you want returned.

    **Parameters**

        s_flag: *str*
            A string to parse out job information with.
        detail: *int, optional*
            The amount of information you want returned.

    **Returns**

        all_jobs: *list*
            Depending on *detail*, you get the following:

                - *details* =0: *list, str*
                    List of all jobs on the queue.

                - *details* =1: *list, tuple, str*
                    List of all jobs on the queue as:
                        (job name, time run, job status)

                - *details* =2: *list, tuple, str*
                    List of all jobs on the queue as:
                        (job name,
                         time run,
                         job status,
                         queue,
                         number of processors)
    '''
    detail = int(detail)

    main_detail = detail
    if detail <= 0:
        detail = 1

    # Get input from jlist as a string
    jlist_path = which("jlist")
    qlist_path = which("qlist")
    jshow_path = which("jshow")

    assert jlist_path is not None,\
        "Error - Cannot find jlist."
    assert qlist_path is not None,\
        "Error - Cannot find qlist."
    assert jshow_path is not None,\
        "Error - Cannot find jshow."

    p = subprocess.Popen([jlist_path],
                         shell=False,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
    output = p.stdout.read().decode("utf-8")

    # Get data from string
    pattern = getpass.getuser() +\
        '''[\s]+([\S]+)[\s]+([\S]+)[\s]+([\S]+)'''
    info = re.findall(pattern, output)

    # Get a list of names
    names = []
    for a in info:
        names.append(a[0])

    if len(names) > 0:
        out_ids = output.split("\n")
        out_ids = [
            x.split()[0] for x in out_ids
            if len(x.split()) > 0 and is_numeric(x.split()[0])
        ]
        info = [tuple(list(i) + [j]) for i, j in zip(info, out_ids)]

    # If user wants more information
    all_jobs = None
    if detail == 3:
        close_pipes(p)
        all_jobs = [i[-1] for i in info]
    elif detail == 2:
        for i, a in enumerate(info):
            p = subprocess.Popen([jshow_path, a[0]], stdout=subprocess.PIPE)
            s = p.stdout.read().decode("utf-8")
            serv = s[s.find('Queue name:'):].split()[2].strip()
            threads = 1
            if "Slot Reservations" in s:
                threads = s[s.find('Slot Reservations'):].split()[4]
                threads = threads.strip()
            info[i] = info[i] + (
                serv,
                threads,
            )
        close_pipes(p)
        all_jobs = info

    if all_jobs is None:
        # Return appropriate information
        close_pipes(p)
        if detail == 1:
            all_jobs = info
        else:
            all_jobs = names

    job_indices = [i for i, j in enumerate(all_jobs) if s_flag in " ".join(j)]
    chosen_jobs = [all_jobs[i] for i in job_indices]
    if main_detail == 0:
        return [j[0] for j in chosen_jobs]
    else:
        return chosen_jobs
Example #14
0
def submit_job(name, job_to_submit, **kwargs):
    '''
    Code to submit a simulation to the specified queue and queueing system.

    **Parameters**

        name: *str*
            Name of the job to be submitted to the queue.
        job_to_submit: *str*
            String holding code you wish to submit.
        queue: *str, optional*
            What queue to run the simulation on (queueing system dependent).
        walltime: *str, optional*
            How long to post the job on the queue for in d-h:m:s where d are
            days, h are hours, m are minutes, and s are seconds.  Default is
            for 30 minutes (00:30:00).
        nprocs: *int, optional*
            How many processors to run the simulation on.  Note, the actual
            number of cores mpirun will use is procs * ntasks.
        sub_flag: *str, optional*
            Additional strings/flags/arguments to add at the end when we
            submit a job using jsub.  That is: jsub demo.nbs sub_flag.
        unique_name: *bool, optional*
            Whether to force the requirement of a unique name or not.  NOTE! If
            you submit simulations from the same folder, ensure that this is
            True lest you have a redundancy problem! To overcome said issue,
            you can set redundancy to True as well (but only if the simulation
            is truly redundant).
        outfile_name: *str, optional*
            Whether to give a unique output file name, or one based on the sim
            name.procs
        xhosts: *str* or *list, str, optional*
            Which cpu to submit the job to.
        email: *str, optional*
            An email address for sending job information to.
        priority: *int, optional*
            What priority to give the submitted job.
        sandbox: *bool, optional*
            Whether to sandbox the job or not.
        redundancy: *bool, optional*
            With redundancy on, if the job is submitted and unique_name is on,
            then if another job of the same name is running, a pointer to that
            job will instead be returned.

    **Returns**

        job_obj: :class:`squid.jobs.nbs.Job`
            A Job object.
    '''
    # Store the defaults
    params = {
        "queue": "shared",
        "nprocs": 1,
        "sub_flag": "",
        "unique_name": True,
        "redundancy": False,
        "outfile_name": None,
        "xhosts": None,
        "email": None,
        "priority": None,
        "sandbox": True,
        "walltime": None,
    }
    ## Ensure we are passing only the above
    #for key, value in kwargs.items():
    #    assert key in params,\
    #        "Error - Unknown variable (%s) passed to nbs.submit_job." % key
    params.update(kwargs)

    if params["walltime"] is not None:
        print("Warning - Walltime is not handled in NBS yet.  Your job will \
have the default time of the given queue.")

    # Ensure variables of correct types
    param_types = {
        "queue": lambda s: str(s).strip(),
        "nprocs": int,
        "sub_flag": lambda s: str(s).strip(),
        "unique_name": bool,
        "redundancy": bool,
        "sandbox": bool,
    }
    for k, f in param_types.items():
        params[k] = f(params[k])

    # Ensure default values make sense
    # Check Queue
    nbs_queues = get_nbs_queues()
    assert params["queue"] in nbs_queues,\
        "Error - Invalid queue (%s) requested.  Options: %s"\
        % (params["queue"], ", ".join(nbs_queues))

    jsub_path = which("jsub")
    assert jsub_path is not None,\
        "Error - Unable to find jsub path!"

    # Deal with variables accordingly
    if params["xhosts"] is not None:
        if isinstance(params["xhosts"], str):
            xhosts = "##NBS-xhost: \"%s\"" % params["xhosts"]
        elif is_array(params["xhosts"]):
            xhosts = "##NBS-xhost: " +\
                     ", ".join(map(lambda x: '"' + x + '"', params["xhosts"]))
        else:
            raise Exception("xhosts has been passed oddly!")
    else:
        xhosts = ""

    # Generate your script
    generic_script = '''#!/bin/sh
##NBS-name: ''' + name + '''
##NBS-nproc: ''' + params["nprocs"] + '''
##NBS-queue: ''' + params["queue"] + '''
''' + ["", "##NBS-unique: yes"][int(params["unique_name"])] + '''
''' + xhosts

    # If emailing, set here
    if params["email"] is not None:
        generic_script += "##NBS-email: " + params["email"] + "\n"

    # If priority is set, add it
    if params["priority"] is not None:
        if int(params["priority"]) > 255:
            params["priority"] = 255
        if int(params["priority"]) < 1:
            params["priority"] = 1
        generic_script += "##NBS-priority: " + str(params["priority"]) + "\n"

    # Take care of sandboxing if needed
    if params["sandbox"] is not None:
        generic_script = generic_script + '''
##NBS-fdisk: 8192
##NBS-fswap: 8192
##NBS-sandbox: yes
##NBS-tmp-sandbox: yes
'''
        for sb_in in params["sandbox"][0]:
            generic_script = generic_script + '''
##NBS-input: ''' + sb_in
        for sb_out in params["sandbox"][1]:
            generic_script = generic_script + '''
##NBS-output: ''' + sb_out + ''' -overwrite'''
        generic_script = generic_script + "\n\n"

    # Add in your script now
    generic_script = generic_script + "\ndate\n" + job_to_submit + "\ndate\n\n"

    f = open(name + '.nbs', 'w')
    f.write(generic_script)
    f.close()

    # Submit job
    cmd = "%s %s.nbs %s" % (jsub_path, name, params["sub_flag"])
    job_pipe = subprocess.Popen(cmd.split(),
                                shell=False,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
    job_err = job_pipe.stderr.read().decode("utf-8")

    if params["redundancy"] and "+notunique:" in job_err:
        try:
            job_info = get_job(name, detail=1)[0]
        except IndexError:
            # Job finished in process of submitting and redundancy call
            job_to_return = Job(name)
            # Attach the redundancy flag
            job_to_return.redundancy = True
            return job_to_return
        job_to_return = Job(job_info[0], job_id=job_info[-1])
        job_to_return.redundancy = True
        close_pipes(job_pipe)
        return job_to_return
    elif "+notunique:" in job_err:
        raise Exception("Job with name %s already exists in the queue!" % name)

    job_id_str = job_pipe.stdout.read().decode("utf-8")

    if "submitted to queue" not in job_id_str:
        print("\nFailed to submit the job!")
        print("--------------- JOB OUTPUT ---------------")
        print(job_id_str)
        print("---------------- JOB ERROR ---------------")
        print(job_err)
        print("---------------------------------")
        sys.stdout.flush()
        raise Exception()

    try:
        job_id = job_id_str.split("submitted to queue")[0].split()[-1][2:-1]
    except IndexError:
        print("ERROR - job_id_str is:")
        print(job_id_str)
        print("Defaulting to None, should still work... FIX!")
        job_id = None
    close_pipes(job_pipe)
    return Job(name, job_id=job_id)
Example #15
0
def get_lmp_obj(parallel=True):
    '''
    This function will find the lmp executable and a corresponding mpi
    executable.  It will handle errors accordingly.

    **Parameters**

        parallel: *bool, optional*
            Whether to get corresponding mpiexec info or not.

    **Returns**

        lmp_path: *str*
            Path to a lammps executable.
        mpi_path: *str*
            Path to an mpi executable.
    '''

    # If running in parallel, ensure we have mpi
    mpi_path = None
    if parallel:
        mpi_path = which("mpiexec")
        p = subprocess.Popen([mpi_path, "-h"],
                             shell=False,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        stdout = str(p.stdout.read().decode("utf-8").strip())
        close_pipes(p)

        # Simple check for openmpi
        assert "mpi" in stdout.lower(),\
            "Error - Unable to access mpiexec.  Please ensure it is in your \
PATH environment variable!"

    # First, look for lmp_X in order of common names
    lmp_path = None
    common_names = ["lmp_mpi", "lmp_serial", "lmp_smrff"]
    lmp_string_id = "Large-scale Atomic/Molecular Massively Parallel Simulator"
    for name in common_names:
        if which(name) is not None:
            # Check stuff
            if mpi_path is not None:
                cmd = [mpi_path, "-n", "1", which(name), "-h"]
            else:
                cmd = [which(name), "-h"]
            p = subprocess.Popen(cmd,
                                 shell=False,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            stdout = str(p.stdout.read().decode("utf-8").strip())

            if lmp_string_id not in stdout:
                close_pipes(p)
                continue
            else:
                # If it works, then save it
                close_pipes(p)
                lmp_path = which(name)
                break
    assert lmp_path is not None,\
        "Error - Unable to find lmp executable.  Please ensure it is \
in your PATH environment variable!"

    return lmp_path, mpi_path
Example #16
0
def get_pdf(frames,
            start=0.0,
            stop=5.0,
            step=0.1,
            cutoff=10.0,
            rho=1.0,
            quanta=0.001,
            output=None,
            persist=False):
    '''
    Obtain the pair distribution function of a list of atoms using
    the Debyer code.

    **Parameters**

        frames: *str or list,* :class:`squid.structures.atom.Atom`
            An xyz file name (with or without the .xyz extension) or
            an input frame to calculate the pdf for.
        start: *float, optional*
            The starting radial distance in Angstroms for the
            calculated pattern.
        stop: *float, optional*
            The ending radial distance in Angstroms for the calculated pattern.
        step: *float, optional*
            Step in Angstroms for the calculated pattern.
        cutoff: *float, optional*
            Cutoff distance in Angstroms for Interatomic Distance
            (ID) calculations.
        rho: *float, optional*
            Numeric density of the system.
        quanta: *float, optional*
            Interatomic Distance (ID) discritization quanta.
        output: *str, optional*
            Output file name with NO extension given
        persist: *bool, optional*
            Whether to persist made .g and .xyz files (True), or remove
            them (False)

    **Returns**

        pdf: *list, tuple, float*
            A list of tuples holding the pdf data (distance in
            Angstroms and Intensity).

    **References**

        * https://debyer.readthedocs.io/en/latest/
    '''

    debyer_path = which("debyer")
    assert debyer_path is not None,\
        "Error - Cannot find debyer in the PATH env variable."

    # If passed frames and not an xyz file name, write to xyz
    append = str(int(random.random() * 1E12))
    if type(frames) is not str:
        write_xyz(frames, "tmp_for_pdf_%s" % append)
        file_name = "tmp_for_pdf_%s" % append
    else:
        file_name = frames

    # Else, we want to ensure file_name is correct
    if file_name.endswith(".xyz"):
        file_name = file_name.split(".xyz")[0]
    if output is None:
        output = file_name

    if stop > cutoff:
        raise Exception("Stopping position should be larger less than \
or equal to the cutoff.")

    # Make command for debyer
    cmd = debyer_path
    cmd += " --cutoff=%.2f" % cutoff
    cmd += " --quanta=%.2f" % quanta
    cmd += " -g"
    cmd += " -f%.2f" % start
    cmd += " -t%.2f" % stop
    cmd += " -s%.2f" % step
    cmd += " --ro=%.2f" % rho
    cmd += " -o %s.g" % output
    cmd += " %s.xyz" % file_name

    # Run debyer and read in the pdf
    os.system(cmd)
    fptr_pdf = open("%s.g" % output, 'r').read().split("\n")
    i = 0
    while fptr_pdf[i].strip().startswith("#"):
        i += 1
    j = len(fptr_pdf) - 1
    while fptr_pdf[j].strip() == "":
        j -= 1
    fptr_pdf = fptr_pdf[i:j + 1]

    pdf = [(float(a.split()[0]), float(a.split()[1])) for a in fptr_pdf]

    if not persist:
        os.system("rm %s.g" % output)
        os.system("rm %s.xyz" % file_name)

    return pdf
Example #17
0
def get_orca_obj(parallel=True):
    '''
    This function will find the orca executable and the corresponding openmpi
    executable.  It will handle errors accordingly.

    **Parameters**

        parallel: *bool, optional*
            Whether we guarantee the relevant parallel openmpi is setup (True)
            or not (False).

    **Returns**

        orca_path: *str*
            The path to the orca executable.
    '''
    # This is to ensure we read in ORCA correctly
    orca_string_id = "An Ab Initio, DFT and Semiempirical electronic structure"
    # This is to find the version
    version_string_id = "Program Version"

    orca_path = which("orca")
    # Determine orca version
    orca_pipe = subprocess.Popen(
        [orca_path, "FAKE_FILE"], shell=False,
        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    stdout = str(orca_pipe.stdout.read().decode("utf-8").strip())
    # stderr = str(p.stderr.read().decode("utf-8").strip())

    assert orca_string_id in stdout,\
        "Error - Unable to access Orca.  Please ensure it is in your PATH \
environment variable!"
    assert version_string_id in stdout,\
        "Error - Unable to assess Orca version!"

    orca_version = stdout.split(version_string_id)[1].strip().split()[0]

    # If running in parallel, ensure we have the correct version of openmpi
    ompi_pipe = None
    if parallel:
        ompi_version_should_be = {
            "4.1.2": "3.1",
            "4.2.0": "3.1"
        }
        assert orca_version in ompi_version_should_be,\
            "Error - Please contact squid dev. We do not have stored the \
required openmpi version for Orca %s" % orca_version

        # Find openmpi
        ompi_path = which("mpiexec")
        ompi_pipe = subprocess.Popen(
            [ompi_path, "--V"], shell=False,
            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        stdout = str(ompi_pipe.stdout.read().decode("utf-8").strip())
        # stderr = str(p.stderr.read().decode("utf-8").strip())

        # Simple check for openmpi
        assert "open" in stdout.lower(),\
            "Error - Unable to access openmpi.  Please ensure it is in your \
PATH environment variable!"

        ompi_version = stdout.strip().split("\n")[0].split()[-1]
        ompi_version_held = ompi_version_should_be[orca_version]

        assert ompi_version.startswith(ompi_version_held),\
            "Error - Incorrect openmpi version for the loaded orca version. \
Should be openmpi %s (found %s) for orca %s."\
        % (ompi_version_held, ompi_version, orca_version)

    close_pipes(orca_pipe)
    close_pipes(ompi_pipe)

    return orca_path
Example #18
0
def chkDFT():
    USERNAME = getuser()

    # One can easily change defaults here (if they do, probably change the
    # help text below accordingly).
    dft, u1, u2, out_name, vmd, ovito, me =\
        'orca', 'Ha', 'Ha', 'out', False, False, False
    dft_list = [dft, 'orca', 'jdftx']

    if '-h' in sys.argv or '-help' in sys.argv or len(sys.argv) < 2:
        print('''
chkDFT
---------
A command to quickly get a glimpse of a DFT simulation.
chkDFT [Sim_Name] [Options]

    Flag        Default     Description
-help, -h     :        :  Print this help menu
-dft          :  orca  :  Specify what type of dft simulation you want to
                          parse. By default it is 'g09', but can be
                          'orca' or 'jdftx'.
-units, -u    :  Ha    :  Specify the units you want the output to be in.
                          By default this is Hartree.
-scale        :  1.0   :  Scale all energies by this value
-out, -o      :  out   :  Make an output file with this name holding all
                          xyz coordinates. If no xyz data is available
                          this will not run. Default output name is
                          'out.xyz' but user can choose their own using
                          this command.
-vmd, -v      :        :  Opens output xyz file in vmd. Flag turns on.
-ovito, -ov   :        :  Opens output xyz file in ovito. Flag turns on.
-me           :        :  Forces the .xyz file to be saved to ~/out.xyz

ex. chkDFT water -dft orca -u kT_300
''')
        sys.exit()

    # Get simulation name
    run_name = sys.argv[1]
    # Get the dft type
    if '-dft' in sys.argv:
        dft = sys.argv[sys.argv.index('-dft') + 1].lower()
        if dft not in dft_list:
            print("Error - %s not recognized for dft." % dft)
            sys.exit()
    # Get units
    if '-u' in sys.argv or '-units' in sys.argv:
        s = '-u' if '-u' in sys.argv else '-units'
        u2 = sys.argv[sys.argv.index(s) + 1]
        if u2 not in constants.ENERGY:
            print("Error - Energy unit not available. Consider using -scale.")
            sys.exit()
    # Get output name
    if '-out' in sys.argv or '-o' in sys.argv:
        s = '-o' if '-o' in sys.argv else '-out'
        out_name = sys.argv[sys.argv.index(s) + 1].replace(' ', '_')
    if len(out_name) < 5 or out_name[-4:] != '.xyz':
        out_name += '.xyz'
    # Get VMD display status
    if '-vmd' in sys.argv or '-v' in sys.argv:
        vmd = True
    # Get ovito display status
    if '-ovito' in sys.argv or '-ov' in sys.argv:
        ovito = True
    # Check if me is forced
    if '-me' in sys.argv:
        me = True

    # Read in data
    if dft == 'g09':
        try:
            data = g09.read(run_name)
        except IOError:
            print("Error - g09 simulation %s does not exist. \
Are you sure -dft g09 is correct?" % run_name)
            sys.exit()
    elif dft == 'orca':
        try:
            data = orca.read(run_name)
        except IOError:
            print("Error - orca simulation %s does not exist. \
Are you sure -dft orca is correct?" % run_name)
            sys.exit()
    elif dft == "jdftx":
        try:
            data = jdftx.read(run_name)
        except IOError:
            print("Error - jdftx simulation %s does not exist. \
Are you sure -dft jdftx is correct?" % run_name)
            sys.exit()
    else:
        print("DFT type %s not available..." % dft)
        sys.exit()

    # Get the header information
    head = 'Job Name: %s\n' % run_name
    head += 'DFT calculation via %s\n' % dft
    head += 'Energy Data Points: %d\n' % len(data.energies)
    if len(data.energies) > 2:
        Ener = str(
            units.convert_energy(u1, u2,
                                 data.energies[-2] - data.energies[-3]))
        head += 'dE 2nd last = %s %s\n' % (Ener, u2)
    if len(data.energies) > 1:
        Ener = str(
            units.convert_energy(u1, u2,
                                 data.energies[-1] - data.energies[-2]))
        head += 'dE last = %s %s\n' % (Ener, u2)
    if len(data.energies) > 0:
        Ener = str(units.convert_energy(u1, u2, data.energies[-1]))
        head += 'Last Energy = %s %s' % (Ener, u2)
    body, tail = '', ''

    if data.convergence is not None:
        for line in data.convergence:
            body += '\t'.join([str(ss) for ss in line]) + '\n'
        body = print_helper.spaced_print(body, delim='\t')

    finished = ""
    if data.finished:
        finished = "(finished) "

    if data.converged:
        tail = 'Job %sconverged in %.2e seconds' % (finished, data.time)
    else:
        tail = 'Job %shas not converged.' % finished

    length = max([len(tmp) for tmp in head.split('\n')] +
                 [len(tmp) for tmp in body.split('\n')] +
                 [len(tmp) for tmp in tail.split('\n')])
    dash = '\n' + ''.join(['-'] * length) + '\n'

    try:
        head2 = data.route.strip() + "\n" + data.extra_section.strip()
        head2 = head2.strip()
    except AttributeError:
        head2 = ""

    if body != '':
        print(dash + head + dash + head2 + dash + body + dash + tail + dash)
    else:
        print(dash + head + dash + head2 + dash + tail + dash)

    try:
        if len(data.frames) > 0:
            if me:
                me = '/fs/home/%s/' % USERNAME
            else:
                me = ''

            files.write_xyz(data.frames, me + out_name[:-4])
            if vmd:
                vmd_path = which("vmd")
                assert vmd_path is not None,\
                    "Error - Cannot find VMD in PATH env var."
                os.system('"' + vmd_path + '" ' + me + out_name)
            elif ovito:
                ovito_path = which("ovito")
                assert ovito_path is not None,\
                    "Error - Cannot find ovito in PATH env var."
                os.system('"' + ovito_path + '" ' + me + out_name)
    except TypeError:
        print("No atomic coordinates available yet...")
    except:
        print("An unexpected error has occurred.")
        sys.exit()