Пример #1
0
    def merge(self, workdir, gswfk_file, dfpt_files, gkk_files, out_gkk, binascii=0):
        """
        Merge GGK files, return the absolute path of the new database.

        Args:
            gswfk_file: Ground-state WFK filename
            dfpt_files: List of 1WFK files to merge.
            gkk_files: List of GKK files to merge.
            out_gkk: Name of the output GKK file
            binascii: Integer flat. 0 --> binary output, 1 --> ascii formatted output
        """
        raise NotImplementedError("This method should be tested")
        #out_gkk = out_gkk if cwd is None else os.path.join(os.path.abspath(cwd), out_gkk)

        # We work with absolute paths.
        gswfk_file = os.path.absath(gswfk_file)
        dfpt_files = [os.path.abspath(s) for s in list_strings(dfpt_files)]
        gkk_files = [os.path.abspath(s) for s in list_strings(gkk_files)]

        print("Will merge %d 1WF files, %d GKK file in output %s" %
              (len(dfpt_files), len(gkk_files), out_gkk))

        if self.verbose:
            for i, f in enumerate(dfpt_files): print(" [%d] 1WF %s" % (i, f))
            for i, f in enumerate(gkk_files): print(" [%d] GKK %s" % (i, f))

        self.stdin_fname, self.stdout_fname, self.stderr_fname = \
            map(os.path.join, 3 * [workdir], ["mrggkk.stdin", "mrggkk.stdout", "mrggkk.stderr"])

        inp = cStringIO()
        inp.write(out_gkk + "\n")        # Name of the output file
        inp.write(str(binascii) + "\n")  # Integer flag: 0 --> binary output, 1 --> ascii formatted output
        inp.write(gswfk_file + "\n")     # Name of the groud state wavefunction file WF

        #dims = len(dfpt_files, gkk_files, ?)
        dims = " ".join([str(d) for d in dims])
        inp.write(dims + "\n")             # Number of 1WF, of GKK files, and number of 1WF files in all the GKK files

        # Names of the 1WF files...
        for fname in dfpt_files:
            inp.write(fname + "\n")

        # Names of the GKK files...
        for fname in gkk_files:
            inp.write(fname + "\n")

        self.stdin_data = [s for s in inp.getvalue()]

        with open(self.stdin_fname, "w") as fh:
            fh.writelines(self.stdin_data)
            # Force OS to write data to disk.
            fh.flush()
            os.fsync(fh.fileno())

        self.execute(workdir)

        return out_gkk
Пример #2
0
    def merge(self, workdir, gswfk_file, dfpt_files, gkk_files, out_gkk, binascii=0):
        """
        Merge GGK files, return the absolute path of the new database.

        Args:
            gswfk_file: Ground-state WFK filename
            dfpt_files: List of 1WFK files to merge.
            gkk_files: List of GKK files to merge.
            out_gkk: Name of the output GKK file
            binascii: Integer flat. 0 --> binary output, 1 --> ascii formatted output
        """
        raise NotImplementedError("This method should be tested")
        #out_gkk = out_gkk if cwd is None else os.path.join(os.path.abspath(cwd), out_gkk)

        # We work with absolute paths.
        gswfk_file = os.path.absath(gswfk_file)
        dfpt_files = [os.path.abspath(s) for s in list_strings(dfpt_files)]
        gkk_files = [os.path.abspath(s) for s in list_strings(gkk_files)]

        print("Will merge %d 1WF files, %d GKK file in output %s" %
              (len(dfpt_files), len(gkk_files), out_gkk))

        if self.verbose:
            for i, f in enumerate(dfpt_files): print(" [%d] 1WF %s" % (i, f))
            for i, f in enumerate(gkk_files): print(" [%d] GKK %s" % (i, f))

        self.stdin_fname, self.stdout_fname, self.stderr_fname = \
            map(os.path.join, 3 * [workdir], ["mrggkk.stdin", "mrggkk.stdout", "mrggkk.stderr"])

        inp = StringIO()
        inp.write(out_gkk + "\n")        # Name of the output file
        inp.write(str(binascii) + "\n")  # Integer flag: 0 --> binary output, 1 --> ascii formatted output
        inp.write(gswfk_file + "\n")     # Name of the groud state wavefunction file WF

        #dims = len(dfpt_files, gkk_files, ?)
        dims = " ".join([str(d) for d in dims])
        inp.write(dims + "\n")             # Number of 1WF, of GKK files, and number of 1WF files in all the GKK files

        # Names of the 1WF files...
        for fname in dfpt_files:
            inp.write(fname + "\n")

        # Names of the GKK files...
        for fname in gkk_files:
            inp.write(fname + "\n")

        self.stdin_data = [s for s in inp.getvalue()]

        with open(self.stdin_fname, "w") as fh:
            fh.writelines(self.stdin_data)
            # Force OS to write data to disk.
            fh.flush()
            os.fsync(fh.fileno())

        self.execute(workdir)

        return out_gkk
Пример #3
0
    def __init__(self,
                 parent,
                 dirpaths=None,
                 filepaths=None,
                 walk=True,
                 wildcard="",
                 **kwargs):
        """
        Args:
            parent:
                parent window
            dirpaths:
                List of directories to scan.
            filepaths
                List of filepaths (absolute paths).
            walk:
                True if we have to browse all files and directories starting from filepaths.
            wildcard
                Regular expressions for selecting files (tokens are separated by |).
        """
        super(FileListFrame, self).__init__(parent, -1, **kwargs)

        if dirpaths is not None:
            dirpaths = map(os.path.abspath, list_strings(dirpaths))
        else:
            dirpaths = []

        if filepaths is not None:
            filepaths = map(os.path.abspath, list_strings(filepaths))
        else:
            filepaths = []

        wildcard = WildCard(wildcard)

        self.all_filepaths = filepaths

        if walk:
            for dirpath in dirpaths:
                for root, dirnames, filenames in os.walk(dirpath):
                    fnames = [os.path.join(root, f) for f in filenames]
                    self.all_filepaths += wildcard.filter(fnames)
        else:
            # Select only the files in dirpaths.
            for dirpath in dirpaths:
                fnames = [os.path.join(dirpat, f) for f in os.listdir(dirpath)]
                fnames = filter(os.path.isfile, fnames)
                self.all_filepaths += wildcard.filter(fnames)

        self.BuildUi()
Пример #4
0
def find_file(files, ext, prefix=None, dataset=None, image=None):
    """
  Given a list of file names, return the file with extension "_" + ext, None if not found.

  The prefix, the dataset index and the image index can be specified

  .. warning::

     There are some border cases that will confuse the algorithm
     since the order of dataset and image is not tested.
     Solving this problem requires the knowledge of ndtset and nimages
     This code, however should work in 99.9% of the cases.
  """
    separator = "_"

    for filename in list_strings(files):
        # Remove Netcdf extension (if any)
        f = filename[:-3] if filename.endswith(".nc") else filename
        if separator not in f: continue
        tokens = f.split(separator)
        if tokens[-1] == ext:
            found = True
            if prefix is not None:
                found = found and filename.startswith(prefix)
            if dataset is not None:
                found = found and "DS" + str(dataset) in tokens
            if image is not None:
                found = found and "IMG" + str(image) in tokens
            if found: return filename
    else:
        return None
Пример #5
0
def find_file(files, ext, prefix=None, dataset=None, image=None):
  """
  Given a list of file names, return the file with extension "_" + ext, None if not found.

  The prefix, the dataset index and the image index can be specified

  .. warning::

     There are some border cases that will confuse the algorithm
     since the order of dataset and image is not tested.
     Solving this problem requires the knowledge of ndtset and nimages
     This code, however should work in 99.9% of the cases.
  """
  separator = "_"

  for filename in list_strings(files):
      # Remove Netcdf extension (if any)
      f = filename[:-3] if filename.endswith(".nc") else filename
      if separator not in f: continue
      tokens = f.split(separator)
      if tokens[-1] == ext:
        found = True
        if prefix is not None:  found = found and filename.startswith(prefix)
        if dataset is not None: found = found and "DS" +  str(dataset) in tokens
        if image is not None:   found = found and "IMG" + str(image)   in tokens
        if found: return filename
  else:
      return None
Пример #6
0
    def merge_qpoints(self, workdir, files_to_merge, out_prefix):
        """
        Execute mrgscr inside directory `workdir` to merge `files_to_merge`.
        Produce new file with prefix `out_prefix`
        """
        # We work with absolute paths.
        files_to_merge = [os.path.abspath(s) for s in list_strings(files_to_merge)]
        nfiles = len(files_to_merge)

        if self.verbose:
            print("Will merge %d files with output_prefix %s" % (nfiles, out_prefix))
            for (i, f) in enumerate(files_to_merge):
                print(" [%d] %s" % (i, f))

        if nfiles == 1:
            raise self.Error("merge_qpoints does not support nfiles == 1")

        self.stdin_fname, self.stdout_fname, self.stderr_fname = \
            map(os.path.join, 3 * [workdir], ["mrgscr.stdin", "mrgscr.stdout", "mrgscr.stderr"])

        inp = cStringIO()
        inp.write(str(nfiles) + "\n")     # Number of files to merge.
        inp.write(out_prefix + "\n")      # Prefix for the final output file:

        for filename in files_to_merge:
            inp.write(filename + "\n")   # List with the files to merge.

        inp.write("1\n")                 # Option for merging q-points.

        self.stdin_data = [s for s in inp.getvalue()]

        with open(self.stdin_fname, "w") as fh:
            fh.writelines(self.stdin_data)

        self.execute(workdir)
Пример #7
0
    def name_tensor_list(self, tensor_names=None, tensor_type="all", tol=None):
        """
        List of (name, tensor) tuples. Only tensors stored in the object are returned.

        Args:
            tensor_names: List of tensor names to select. None means all.
            tensor_type: Select tensors by type. Must be in ["all", "elastic", "piezoelectric"].
            tol: Set to zero all all entries below this threshold
        """
        l = []
        if tensor_names is None:
            for name in self.TYPE2NAMES[tensor_type]:
                tensor = self.get_tensor(name, tol=tol)
                if tensor is not None:
                    l.append((name, tensor))
        else:
            for name in list_strings(tensor_names):
                if name not in self.TYPE2NAMES[tensor_type]:
                    raise ValueError(
                        "tensor name %s does not belong to type: `%s`" %
                        (name, tensor_type))
                tensor = self.get_tensor(name, tol=tol)
                if tensor is not None:
                    l.append((name, tensor))

        return l
Пример #8
0
    def parse(self, filenames):
        """
        Read and parse a filename or a list of filenames.

        Files that cannot be opened are ignored. A single filename may also be given.
        Return list of successfully read files.
        """
        filenames = list_strings(filenames)

        read_ok = []
        for fname in filenames:
            try:
                fh = open(fname)
            except IOError:
                logger.warning("Cannot open file %s" % fname)
                continue

            try:
                self._read(fh, fname)
                read_ok.append(fname)

            except self.Error as e:
                logger.warning("exception while parsing file %s:\n%s" % (fname, str(e)))
                continue

            finally:
                fh.close()

        # Add read_ok to the list of files that have been parsed.
        self._filenames.extend(read_ok)
        return read_ok
Пример #9
0
    def dojo_compare(self, what="all", **kwargs):
        """
        Compare ecut convergence and Deltafactor, GBRV results
        """
        import matplotlib.pyplot as plt
        show = kwargs.pop("show", True)
        what = list_strings(what)
        figs = []

        if all(p.dojo_report.has_trial("deltafactor") for p in self) and \
           any(k in what for k in ("all", "ecut")):

            fig_etotal, ax_list = plt.subplots(nrows=len(self),
                                               ncols=1,
                                               sharex=True,
                                               squeeze=True)
            figs.append(fig_etotal)

            for ax, pseudo in zip(ax_list, self):
                pseudo.dojo_report.plot_etotal_vs_ecut(ax=ax,
                                                       show=False,
                                                       label=pseudo.basename)
            if show: plt.show()

        if all(p.dojo_report.has_trial("deltafactor") for p in self) and \
           any(k in what for k in ("all", "df", "deltafactor")):

            fig_deltafactor, ax_grid = plt.subplots(nrows=5,
                                                    ncols=len(self),
                                                    sharex=True,
                                                    sharey="row",
                                                    squeeze=False)
            figs.append(fig_deltafactor)

            for ax_list, pseudo in zip(ax_grid.T, self):
                pseudo.dojo_report.plot_deltafactor_convergence(
                    xc=pseudo.xc, ax_list=ax_list, show=False)

            fig_deltafactor.suptitle(" vs ".join(p.basename for p in self))
            if show: plt.show()

        # Compare GBRV results
        if all(p.dojo_report.has_trial("gbrv_bcc") for p in self) and \
           any(k in what for k in ("all", "gbrv")):

            fig_gbrv, ax_grid = plt.subplots(nrows=2,
                                             ncols=len(self),
                                             sharex=True,
                                             sharey="row",
                                             squeeze=False)
            figs.append(fig_gbrv)

            for ax_list, pseudo in zip(ax_grid.T, self):
                pseudo.dojo_report.plot_gbrv_convergence(ax_list=ax_list,
                                                         show=False)

            fig_gbrv.suptitle(" vs ".join(p.basename for p in self))
            if show: plt.show()

        return figs
Пример #10
0
    def fix_paths(self, paths):
        """
        Fix the filenames in the iterable paths

        Returns:
            old2new: Mapping old_path --> new_path
        """
        old2new, fixed_exts = {}, []

        for path in list_strings(paths):
            newpath, ext = self._fix_path(path)

            if newpath is not None:
                #if ext not in fixed_exts:
                #    if ext == "1WF": continue
                #    raise ValueError("Unknown extension %s" % ext)
                #print(ext, path, fixed_exts)
                #if ext != '1WF':
                #    assert ext not in fixed_exts
                if ext not in fixed_exts:
                    if ext == "1WF": continue
                    raise ValueError("Unknown extension %s" % ext)
                fixed_exts.append(ext)
                old2new[path] = newpath

        return old2new
Пример #11
0
    def from_flow(cls, flow, outdirs="all", nids=None, ext=None, task_class=None):
        """
        Build a robot from a |Flow| object.

        Args:
            flow: |Flow| object
            outdirs: String used to select/ignore the files in the output directory of flow, works and tasks
                outdirs="work" selects only the outdir of the Works,
                outdirs="flow+task" selects the outdir of the Flow and the outdirs of the tasks
                outdirs="-work" excludes the outdir of the Works.
                Cannot use ``+`` and ``-`` flags in the same string.
                Default: `all` that is equivalent to "flow+work+task"
            nids: List of node identifiers used to select particular nodes. Not used if None
            ext: File extension associated to the robot. Mainly used if method is invoked with the BaseClass
            task_class: Task class or string with the class name used to select the tasks in the flow.
                None implies no filtering.

        Usage example:

        .. code-block:: python

            with abilab.GsrRobot.from_flow(flow) as robot:
                print(robot)

            # That is equivalent to:
            with Robot.from_flow(flow, ext="GSR") as robot:
                print(robot)

        Returns:
            ``Robot`` subclass.
        """
        robot = cls() if ext is None else cls.class_for_ext(ext)()
        all_opts = ("flow", "work", "task")

        if outdirs == "all":
            tokens = all_opts
        elif "+" in outdirs:
            assert "-" not in outdirs
            tokens = outdirs.split("+")
        elif "-" in outdirs:
            assert "+" not in outdirs
            tokens = [s for s in all if s not in outdirs.split("-")]
        else:
            tokens = list_strings(outdirs)

        if not all(t in all_opts for t in tokens):
            raise ValueError("Wrong outdirs string %s" % outdirs)

        if "flow" in tokens:
            robot.add_extfile_of_node(flow, nids=nids, task_class=task_class)

        if "work" in tokens:
            for work in flow:
                robot.add_extfile_of_node(work, nids=nids, task_class=task_class)

        if "task" in tokens:
            for task in flow.iflat_tasks():
                robot.add_extfile_of_node(task, nids=nids, task_class=task_class)

        return robot
Пример #12
0
    def read_tensor3_terms(self, key, components, itemp=0):
        """
        Args:
            key: Name of the netcdf variable to read.
            components: List of cartesian tensor components to plot e.g. ["xxx", "xyz"].
                "all" if all components available on file should be plotted on the same ax.
            itemp: Temperature index.

        Return:
            :class:`OrderedDict` mapping cartesian components e.g. "xyz" to data dictionary.
            Individual entries are listed in ALL_CHIS[key]["terms"]
        """
        # arrays have Fortran shape [two, nomega, num_comp, ntemp]
        if components == "all": components = self.computed_components[key]
        components = list_strings(components)
        if not (self.ntemp > itemp >= 0):
            raise ValueError("Invalid itemp: %s, ntemp: %s" % (itemp, self.ntemp))

        od = OrderedDict([(comp, OrderedDict()) for comp in components])
        for chiname in ALL_CHIS[key]["terms"]:
            #print("About to read:", chiname)
            var = self.read_variable(chiname)
            for comp in components:
                try:
                    ijkp = self.computed_components[key].index(comp)
                except ValueError:
                    raise ValueError("%s component %s was not computed" % (key, comp))
                values = var[itemp, ijkp]
                od[comp][chiname] = values[:, 0] + 1j * values[:, 1]
        return od
Пример #13
0
    def from_files(cls, filenames, labels=None, abspath=False):
        """
        Build a Robot from a list of `filenames`.
        if labels is None, labels are automatically generated from absolute paths.

        Args:
            abspath: True if paths in index should be absolute. Default: Relative to `top`.
        """
        filenames = list_strings(filenames)
        from abipy.abilab import abiopen
        filenames = [f for f in filenames if cls.class_handles_filename(f)]
        items = []
        for i, f in enumerate(filenames):
            try:
                abifile = abiopen(f)
            except Exception as exc:
                cprint("Exception while opening file: `%s`" % str(f), "red")
                cprint(exc, "red")
                abifile = None

            if abifile is not None:
                label = abifile.filepath if labels is None else labels[i]
                items.append((label, abifile))

        new = cls(*items)
        if labels is None and not abspath: new.trim_paths(start=None)
        return new
Пример #14
0
    def merge_qpoints(self, workdir, files_to_merge, out_prefix):
        """
        Execute mrgscr inside directory `workdir` to merge `files_to_merge`.
        Produce new file with prefix `out_prefix`
        """
        # We work with absolute paths.
        files_to_merge = [os.path.abspath(s) for s in list_strings(files_to_merge)]
        nfiles = len(files_to_merge)

        if self.verbose:
            print("Will merge %d files with output_prefix %s" % (nfiles, out_prefix))
            for (i, f) in enumerate(files_to_merge):
                print(" [%d] %s" % (i, f))

        if nfiles == 1:
            raise self.Error("merge_qpoints does not support nfiles == 1")

        self.stdin_fname, self.stdout_fname, self.stderr_fname = \
            map(os.path.join, 3 * [workdir], ["mrgscr.stdin", "mrgscr.stdout", "mrgscr.stderr"])

        inp = cStringIO()
        inp.write(str(nfiles) + "\n")     # Number of files to merge.
        inp.write(out_prefix + "\n")      # Prefix for the final output file:

        for filename in files_to_merge:
            inp.write(filename + "\n")   # List with the files to merge.

        inp.write("1\n")                 # Option for merging q-points.

        self.stdin_data = [s for s in inp.getvalue()]

        with open(self.stdin_fname, "w") as fh:
            fh.writelines(self.stdin_data)

        self.execute(workdir)
Пример #15
0
    def fix_paths(self, paths):
        """
        Fix the filenames in the iterable paths

        Returns:
            old2new: Mapping old_path --> new_path
        """
        old2new, fixed_exts = {}, []

        for path in list_strings(paths):
            newpath, ext = self._fix_path(path)

            if newpath is not None:
                #if ext not in fixed_exts:
                #    if ext == "1WF": continue
                #    raise ValueError("Unknown extension %s" % ext)
                #print(ext, path, fixed_exts)
                #if ext != '1WF':
                #    assert ext not in fixed_exts
                if ext not in fixed_exts:
                    if ext == "1WF": continue
                    raise ValueError("Unknown extension %s" % ext)
                fixed_exts.append(ext)
                old2new[path] = newpath

        return old2new
Пример #16
0
    def read_lineps(self, components, itemp=0):
        """
        Args:
            components: List of cartesian tensor components to plot e.g. ["xx", "xy"].
                "all" if all components available on file should be plotted on the same ax.
            itemp: Temperature index.
        """
        # linopt_epsilon has *Fortran* shape [two, nomega, num_comp, ntemp]
        key = "linopt"
        if components == "all": components = self.computed_components[key]
        if not (self.ntemp > itemp >= 0):
            raise ValueError("Invalid itemp: %s, ntemp: %s" %
                             (itemp, self.ntemp))

        var = self.read_variable("linopt_epsilon")
        od = OrderedDict()
        for comp in list_strings(components):
            try:
                ijp = self.computed_components[key].index(comp)
            except ValueError:
                raise ValueError("epsilon_component %s was not computed" %
                                 comp)

            values = var[itemp, ijp]
            od[comp] = values[:, 0] + 1j * values[:, 1]
        return od
Пример #17
0
    def read_tensor3_terms(self, key, components, itemp=0):
        """
        Args:
            key: Name of the netcdf variable to read.
            components: List of cartesian tensor components to plot e.g. ["xxx", "xyz"].
                "all" if all components available on file should be plotted on the same ax.
            itemp: Temperature index.

        Return:
            :class:`OrderedDict` mapping cartesian components e.g. "xyz" to data dictionary.
            Individual entries are listed in ALL_CHIS[key]["terms"]
        """
        # arrays have Fortran shape [two, nomega, num_comp, ntemp]
        if components == "all": components = self.computed_components[key]
        components = list_strings(components)
        if not (self.ntemp > itemp >= 0):
            raise ValueError("Invalid itemp: %s, ntemp: %s" %
                             (itemp, self.ntemp))

        od = OrderedDict([(comp, OrderedDict()) for comp in components])
        for chiname in ALL_CHIS[key]["terms"]:
            #print("About to read:", chiname)
            var = self.read_variable(chiname)
            for comp in components:
                try:
                    ijkp = self.computed_components[key].index(comp)
                except ValueError:
                    raise ValueError("%s component %s was not computed" %
                                     (key, comp))
                values = var[itemp, ijkp]
                od[comp][chiname] = values[:, 0] + 1j * values[:, 1]
        return od
Пример #18
0
 def remove_vars(self, keys):
     """Remove the variables listed in keys."""
     for key in list_strings(keys):
         if key not in self:
             raise KeyError("key: %s not in self:\n %s" %
                            (key, list(self.keys())))
         self.pop(key)
Пример #19
0
    def plot_linopt(self, select="all", itemp=0, xlims=None, **kwargs):
        """
        Subplots with all linear optic quantities selected by ``select`` at temperature ``itemp``.

        Args:
            select:
            itemp: Temperature index.
            xlims: Set the data limits for the x-axis. Accept tuple e.g. ``(left, right)``
                or scalar e.g. ``left``. If left (right) is None, default values are used.

        Returns: |matplotlib-Figure|
        """
        key = "linopt"
        if not self.reader.computed_components[key]: return None
        if select == "all": select = list(LINEPS_WHAT2EFUNC.keys())
        select = list_strings(select)

        nrows, ncols = len(select), 1
        ax_mat, fig, plt = get_axarray_fig_plt(None,
                                               nrows=nrows,
                                               ncols=ncols,
                                               sharex=True,
                                               sharey=False,
                                               squeeze=True)

        components = self.reader.computed_components[key]
        for i, (what, ax) in enumerate(zip(select, ax_mat)):
            self.plot_linear_epsilon(what=what,
                                     itemp=itemp,
                                     components=components,
                                     ax=ax,
                                     xlims=xlims,
                                     with_xlabel=(i == len(select) - 1),
                                     show=False)
        return fig
Пример #20
0
    def from_flow(cls, flow, outdirs="all", nids=None, ext=None, task_class=None):
        """
        Build a robot from a |Flow| object.

        Args:
            flow: |Flow| object
            outdirs: String used to select/ignore the files in the output directory of flow, works and tasks
                outdirs="work" selects only the outdir of the Works,
                outdirs="flow+task" selects the outdir of the Flow and the outdirs of the tasks
                outdirs="-work" excludes the outdir of the Works.
                Cannot use ``+`` and ``-`` flags in the same string.
                Default: `all` that is equivalent to "flow+work+task"
            nids: List of node identifiers used to select particular nodes. Not used if None
            ext: File extension associated to the robot. Mainly used if method is invoked with the BaseClass
            task_class: Task class or string with the class name used to select the tasks in the flow.
                None implies no filtering.

        Usage example:

        .. code-block:: python

            with abilab.GsrRobot.from_flow(flow) as robot:
                print(robot)

            # That is equivalent to:
            with Robot.from_flow(flow, ext="GSR") as robot:
                print(robot)

        Returns:
            ``Robot`` subclass.
        """
        robot = cls() if ext is None else cls.class_for_ext(ext)()
        all_opts = ("flow", "work", "task")

        if outdirs == "all":
            tokens = all_opts
        elif "+" in outdirs:
            assert "-" not in outdirs
            tokens = outdirs.split("+")
        elif "-" in outdirs:
            assert "+" not in outdirs
            tokens = [s for s in all if s not in outdirs.split("-")]
        else:
            tokens = list_strings(outdirs)

        if not all(t in all_opts for t in tokens):
            raise ValueError("Wrong outdirs string %s" % outdirs)

        if "flow" in tokens:
            robot.add_extfile_of_node(flow, nids=nids, task_class=task_class)

        if "work" in tokens:
            for work in flow:
                robot.add_extfile_of_node(work, nids=nids, task_class=task_class)

        if "task" in tokens:
            for task in flow.iflat_tasks():
                robot.add_extfile_of_node(task, nids=nids, task_class=task_class)

        return robot
Пример #21
0
    def plot_linopt(self, select="all", itemp=0, xlims=None, **kwargs):
        """
        Subplots with all linear optic quantities selected by ``select`` at temperature ``itemp``.

        Args:
            select:
            itemp: Temperature index.
            xlims: Set the data limits for the x-axis. Accept tuple e.g. ``(left, right)``
                or scalar e.g. ``left``. If left (right) is None, default values are used.

        Returns: |matplotlib-Figure|
        """
        key = "linopt"
        if not self.reader.computed_components[key]: return None
        if select == "all": select = list(LINEPS_WHAT2EFUNC.keys())
        select = list_strings(select)

        nrows, ncols = len(select), 1
        ax_mat, fig, plt = get_axarray_fig_plt(None, nrows=nrows, ncols=ncols,
                                               sharex=True, sharey=False, squeeze=True)

        components = self.reader.computed_components[key]
        for i, (what, ax) in enumerate(zip(select, ax_mat)):
            self.plot_linear_epsilon(what=what, itemp=itemp, components=components,
                                     ax=ax, xlims=xlims, with_xlabel=(i == len(select) - 1),
                                     show=False)
        return fig
Пример #22
0
    def from_files(cls, filenames, labels=None, abspath=False):
        """
        Build a Robot from a list of `filenames`.
        if labels is None, labels are automatically generated from absolute paths.

        Args:
            abspath: True if paths in index should be absolute. Default: Relative to `top`.
        """
        filenames = list_strings(filenames)
        from abipy.abilab import abiopen
        filenames = [f for f in filenames if cls.class_handles_filename(f)]
        items = []
        for i, f in enumerate(filenames):
            try:
                abifile = abiopen(f)
            except Exception as exc:
                cprint("Exception while opening file: `%s`" % str(f), "red")
                cprint(exc, "red")
                abifile = None

            if abifile is not None:
                label = abifile.filepath if labels is None else labels[i]
                items.append((label, abifile))

        new = cls(*items)
        if labels is None and not abspath: new.trim_paths(start=None)
        return new
Пример #23
0
 def remove_variables(self, keys):
     """Remove the variables listed in keys."""
     for key in list_strings(keys):
         if key not in self:
             raise KeyError("key: %s not in self:\n %s" % (key, list(self.keys())))
         #self.pop(key, None)
         self.pop(key)
Пример #24
0
    def from_dojodir(cls, top, exclude_basenames=None):
        """
        Initialize the table of pseudos for one of the top level directories
        located in the pseudo_dojo.pseudos directory.

        Args:
            exclude_basenames: Optional string or list of strings with the 
                pseudo basenames to be excluded.

        .. warning::
            
            The table may contain multiple pseudos for a given chemical element.
            Don't use this method unless you need this feature and you know what 
            you are doing.
        """
        # Read metadata from the __init__.py file
        import imp
        module_name = os.path.join(top, "__init__.py")
        meta = imp.load_source(module_name, os.path.join(top, "__init__.py") )

        # Gather all pseudos starting from the current working directory 
        all_symbols = set(element.symbol for element in PeriodicTable().all_elements)
        dirs = [os.path.join(top, d) for d in os.listdir(top) if d in all_symbols]

        exclude = set(list_strings(exclude_basenames)) if exclude_basenames is not None else set()

        paths = []
        for dir in dirs:
            paths.extend(os.path.join(dir, f) for f in os.listdir(dir) 
                         if f.endswith(meta.pseudo_ext)
                         and f not in exclude #!= "Sr-sp.psp8"
                         )

        new = cls(paths).sort_by_z()
        return new
Пример #25
0
    def parse(self, filenames):
        """
        Read and parse a filename or a list of filenames.
        Files that cannot be opened are ignored. A single filename may also be given.

        Return: list of successfully read files.
        """
        filenames = list_strings(filenames)

        read_ok = []
        for fname in filenames:
            try:
                fh = open(fname)
            except IOError:
                logger.warning("Cannot open file %s" % fname)
                continue

            try:
                self._read(fh, fname)
                read_ok.append(fname)

            except self.Error as e:
                logger.warning("exception while parsing file %s:\n%s" %
                               (fname, str(e)))
                continue

            finally:
                fh.close()

        # Add read_ok to the list of files that have been parsed.
        self._filenames.extend(read_ok)
        return read_ok
Пример #26
0
    def get_eos_fits_dataframe(self, eos_names="murnaghan"):
        """
        Fit energy as function of volume to get the equation of state,
        equilibrium volume, bulk modulus and its derivative wrt to pressure.

        Args:
            eos_names: String or list of strings with EOS names.
                For the list of available models, see pymatgen.analysis.eos.

        Return:
            (fits, dataframe) namedtuple.
                fits is a list of ``EOSFit object``
                dataframe is a |pandas-DataFrame| with the final results.
        """
        # Read volumes and energies from the GSR files.
        energies, volumes = [], []
        for label, gsr in self.items():
            energies.append(float(gsr.energy))
            volumes.append(float(gsr.structure.volume))

        # Order data by volumes if needed.
        if np.any(np.diff(volumes) < 0):
            ves = sorted(zip(volumes, energies), key=lambda t: t[0])
            volumes = [t[0] for t in ves]
            energies = [t[1] for t in ves]

        # Note that eos.fit expects lengths in Angstrom, and energies in eV.
        # I'm also monkey-patching the plot method.
        from pymatgen.analysis.eos import EOS
        if eos_names == "all":
            # Use all the available models.
            eos_names = [
                n for n in EOS.MODELS
                if n not in ("deltafactor", "numerical_eos")
            ]
        else:
            eos_names = list_strings(eos_names)

        fits, index, rows = [], [], []
        for eos_name in eos_names:
            try:
                fit = EOS(eos_name=eos_name).fit(volumes, energies)
            except Exception as exc:
                cprint("EOS %s raised exception:\n%s" % (eos_name, str(exc)))
                continue

            # Replace plot with plot_ax method
            fit.plot = fit.plot_ax
            fits.append(fit)
            index.append(eos_name)
            rows.append(
                OrderedDict([(aname, getattr(fit, aname))
                             for aname in ("v0", "e0", "b0_GPa", "b1")]))

        dataframe = pd.DataFrame(
            rows, index=index, columns=list(rows[0].keys()) if rows else None)
        return dict2namedtuple(fits=fits, dataframe=dataframe)
Пример #27
0
    def get_msq_tmesh(self, tmesh, iatom_list=None, what_list=("displ", "vel")):
        """
        Compute mean square displacement for each atom in `iatom_list` as a function of T.
        in Cartesian coordinates and atomic-units.

        Args:
            tmesh: array-like with temperatures in Kelvin.
            iatom_list: List of atom sites to compute. None if all aomts are wanted.
            what_list: "displ" for displacement, "vel" for velocity tensor.

        Return:
            namedtuple with (tmesh=tmesh, displ=msq_d, vel=msq_v)

            msq_d = np.empty((natom, 3, 3, nt))
        """
        tmesh = np.array(tmesh)
        nt = len(tmesh)

        # Frequency mesh starts at iomin to avoid 1/0 and ignore eventual negative frequencies.
        for iomin, w in enumerate(self.wmesh):
            if w > 1e-12: break
        else:
            raise ValueError("Cannot find index such that wmesh[i] > 1e-12 !!!")
        wvals = self.wmesh[iomin:]
        nw = len(wvals)

        # We will compute: Ucart(T, k, ij) = 1/M_k \int dw (n(w) + 1/2) g_ij(w) / w for the k-atom in a.u.
        # Calculate Bose-Einstein occupation factors only once for each T (instead of for each atom).
        npht = np.zeros((nt, nw))
        for it, temp in enumerate(tmesh):
            npht[it] = abu.occ_be(wvals, temp * abu.kb_HaK) + 0.5

        natom = len(self.structure)
        msq_d = np.empty((natom, 3, 3, nt))
        msq_v = np.empty((natom, 3, 3, nt))
        what_list = list_strings(what_list)

        # Perform frequency integration to get tensor(T)
        from scipy.integrate import simps
        if iatom_list is not None: iatom_list = set(iatom_list)
        for iatom in range(natom):
            if iatom_list is not None and iatom not in iatom_list: continue
            symbol = self.structure[iatom].specie.symbol
            for it in range(nt):
                fn = self.values[iatom, :, :, iomin:] * npht[it]
                if "displ" in what_list:
                    # Mean square displacement for each atom as a function of T (bohr^2).
                    ys = fn / wvals
                    fact = 1.0 / (self.amu_symbol[symbol] * abu.amu_emass)
                    msq_d[iatom, :, :, it] = simps(ys, x=wvals) * fact * abu.Bohr_Ang ** 2
                if "vel" in what_list:
                    # Mean square velocity for each atom as a function of T (bohr^2/atomic time unit^2)"
                    ys = fn * wvals
                    fact = 1.0 / (self.amu_symbol[symbol] * abu.amu_emass)
                    msq_v[iatom, :, :, it] = simps(ys, x=wvals) * fact # * abu.velocity_at_to_si ** 2

        return dict2namedtuple(tmesh=tmesh, displ=msq_d, vel=msq_v)
Пример #28
0
    def merge(self,
              workdir,
              ddb_files,
              out_ddb,
              description,
              delete_source_ddbs=True):
        """Merge DDB file, return the absolute path of the new database in workdir."""
        # We work with absolute paths.
        ddb_files = [os.path.abspath(s) for s in list_strings(ddb_files)]
        if not os.path.isabs(out_ddb):
            out_ddb = os.path.join(os.path.abspath(workdir),
                                   os.path.basename(out_ddb))

        if self.verbose:
            print("Will merge %d files into output DDB %s" %
                  (len(ddb_files), out_ddb))
            for i, f in enumerate(ddb_files):
                print(" [%d] %s" % (i, f))

        # Handle the case of a single file since mrgddb uses 1 to denote GS files!
        if len(ddb_files) == 1:
            with open(ddb_files[0], "r") as inh, open(out_ddb, "w") as out:
                for line in inh:
                    out.write(line)
            return out_ddb

        self.stdin_fname, self.stdout_fname, self.stderr_fname = \
            map(os.path.join, 3 * [os.path.abspath(workdir)], ["mrgddb.stdin", "mrgddb.stdout", "mrgddb.stderr"])

        inp = StringIO()
        inp.write(out_ddb + "\n")  # Name of the output file.
        inp.write(str(description) + "\n")  # Description.
        inp.write(str(len(ddb_files)) + "\n")  # Number of input DDBs.

        # Names of the DDB files.
        for fname in ddb_files:
            inp.write(fname + "\n")

        self.stdin_data = [s for s in inp.getvalue()]

        with open(self.stdin_fname, "wt") as fh:
            fh.writelines(self.stdin_data)
            # Force OS to write data to disk.
            fh.flush()
            os.fsync(fh.fileno())

        retcode = self.execute(workdir, exec_args=['--nostrict'])
        if retcode == 0 and delete_source_ddbs:
            # Remove ddb files.
            for f in ddb_files:
                try:
                    os.remove(f)
                except IOError:
                    pass

        return out_ddb
Пример #29
0
    def merge(self, workdir, pot_files, out_dvdb, delete_source=True):
        """
        Merge POT files containing 1st order DFPT potential
        return the absolute path of the new database in workdir.

        Args:
            delete_source: True if POT1 files should be removed after (successful) merge.
        """
        # We work with absolute paths.
        pot_files = [os.path.abspath(s) for s in list_strings(pot_files)]
        if not os.path.isabs(out_dvdb):
            out_dvdb = os.path.join(os.path.abspath(workdir),
                                    os.path.basename(out_dvdb))

        if self.verbose:
            print("Will merge %d files into output DVDB %s" %
                  (len(pot_files), out_dvdb))
            for i, f in enumerate(pot_files):
                print(" [%d] %s" % (i, f))

        # Handle the case of a single file since mrgddb uses 1 to denote GS files!
        if len(pot_files) == 1:
            with open(pot_files[0], "r") as inh, open(out_dvdb, "w") as out:
                for line in inh:
                    out.write(line)
            return out_dvdb

        self.stdin_fname, self.stdout_fname, self.stderr_fname = \
            map(os.path.join, 3 * [os.path.abspath(workdir)], ["mrgdvdb.stdin", "mrgdvdb.stdout", "mrgdvdb.stderr"])

        inp = StringIO()
        inp.write(out_dvdb + "\n")  # Name of the output file.
        inp.write(str(len(pot_files)) + "\n")  # Number of input POT files.

        # Names of the POT files.
        for fname in pot_files:
            inp.write(fname + "\n")

        self.stdin_data = [s for s in inp.getvalue()]

        with open(self.stdin_fname, "wt") as fh:
            fh.writelines(self.stdin_data)
            # Force OS to write data to disk.
            fh.flush()
            os.fsync(fh.fileno())

        retcode = self.execute(workdir)
        if retcode == 0 and delete_source:
            # Remove pot files.
            for f in pot_files:
                try:
                    os.remove(f)
                except IOError:
                    pass

        return out_dvdb
Пример #30
0
    def __init__(self, parent, dirpaths=None, filepaths=None, walk=True, wildcard="", **kwargs):
        """
        Args:
            parent:
                parent window
            dirpaths:
                List of directories to scan.
            filepaths
                List of filepaths (absolute paths).
            walk:
                True if we have to browse all files and directories starting from filepaths.
            wildcard
                Regular expressions for selecting files (tokens are separated by |).
        """
        super(FileListFrame, self).__init__(parent, -1, **kwargs)

        if dirpaths is not None:
            dirpaths = map(os.path.abspath, list_strings(dirpaths))
        else:
            dirpaths = []

        if filepaths is not None:
            filepaths = map(os.path.abspath, list_strings(filepaths))
        else:
            filepaths = []

        wildcard = WildCard(wildcard)

        self.all_filepaths = filepaths

        if walk:
            for dirpath in dirpaths:
                for root, dirnames, filenames in os.walk(dirpath):
                    fnames = [os.path.join(root, f) for f in filenames]
                    self.all_filepaths += wildcard.filter(fnames)
        else:
            # Select only the files in dirpaths.
            for dirpath in dirpaths:
                fnames = [os.path.join(dirpat, f) for f in os.listdir(dirpath)]
                fnames = filter(os.path.isfile, fnames)
                self.all_filepaths += wildcard.filter(fnames)

        self.BuildUi()
Пример #31
0
    def from_dojodir(cls, top, exclude_wildcard=None, exclude_basenames=None):
        """
        Initialize the table from one of the top level directories located
        in the pseudo_dojo.pseudos directory.

        Args:
            top: top level directory
            exclude_basenames: Optional string or list of strings with the
                pseudo basenames to be excluded.
            exclude_wildcard: String of tokens separated by "|". Each token represents a pattern.
                to be exluded
                Example:
                  wildcard="*_r.psp8|*.xml" selects only those files that do not end with _r.psp8 or .xml

        .. warning::

            The table may contain multiple pseudos for a given chemical element.
            Don't use this method unless you need this feature and you know what you are doing.
        """
        # Read metadata from the __init__.py file
        import imp
        module_name = os.path.join(top, "__init__.py")
        if not os.path.isfile(module_name):
            raise RuntimeError("__init_.py file is missing in dir: %s" % top)

        meta = imp.load_source(module_name, os.path.join(top, "__init__.py"))

        # Gather all pseudos starting from the current working directory
        all_symbols = set(e.symbol for e in Element)
        dirs = [
            os.path.join(top, d) for d in os.listdir(top) if d in all_symbols
        ]

        exclude = set(list_strings(
            exclude_basenames)) if exclude_basenames is not None else set()

        paths = []
        for dr in dirs:
            paths.extend(
                os.path.join(dr, f) for f in os.listdir(dr)
                if f.endswith(meta.pseudo_ext) and f not in exclude)

        if exclude_wildcard is not None:
            wild = WildCard(exclude_wildcard)
            paths = [p for p in paths if not wild.match(os.path.basename(p))]

        pseudos = []
        for p in paths:
            pseudo = dojopseudo_from_file(p)
            if pseudo is None:
                print("Error while parsing:", p)
                continue
            pseudos.append(pseudo)

        return cls(pseudos).sort_by_z()
Пример #32
0
def makeAboutBox(codename,
                 version,
                 description,
                 developers,
                 website=None,
                 icon_path=None):

    licence = """%(codename)s is free software; you can redistribute 
it and/or modify it under the terms of the GNU General Public License as 
published by the Free Software Foundation; either version 2 of the License, 
or (at your option) any later version.

%(codename)s is distributed in the hope that it will be useful, 
but WITHOUT ANY WARRANTY; without even the implied warranty of 
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  
See the GNU General Public License for more details. You should have 
received a copy of the GNU General Public License along with the code; 
if not, write to the Free Software Foundation, Inc., 59 Temple Place, 
Suite 330, Boston, MA  02111-1307  USA""" % {
        "codename": codename
    }

    # Make a template for the description
    #desc = "\n".join(["\nwxPython Cookbook Chapter 5\n",
    #                  "Platform Info: (%s,%s)",
    #                  "License: Public Domain"])

    ## Get the platform information
    #py_version = [sys.platform, ", python ", sys.version.split()[0]]
    #platform = list(wx.PlatformInfo[1:])
    #platform[0] += (" " + wx.VERSION_STRING)
    #wx_info = ", ".join(platform)

    #info.SetDescription(desc % (py_version, wx_info))

    info = wx.AboutDialogInfo()

    if icon_path is not None:
        info.SetIcon(wx.Icon(icon_path, wx.BITMAP_TYPE_PNG))

    info.SetName(codename)
    info.SetVersion(version)
    info.SetDescription(description)
    info.SetCopyright('(C) Abipy group')

    if website is not None:
        info.SetWebSite(website)

    info.SetLicence(licence)

    for dev in list_strings(developers):
        info.AddDeveloper(dev)

    wx.AboutBox(info)
Пример #33
0
 def vars_with_section(self, sections):
     """
     List of :class:`Variable` associated to the given sections.
     sections can be a string or a list of strings.
     """
     sections = set(list_strings(sections))
     varlist = []
     for v in self.values():
         if v.section in sections:
             varlist.append(v)
     return varlist
Пример #34
0
    def __init__(self,
                 pseudos,
                 pseudo_dir="",
                 structure=None,
                 ndtset=1,
                 comment="",
                 decorators=None):
        """
        Args:
            pseudos: String or list of string with the name of the pseudopotential files.
            pseudo_dir: Name of the directory where the pseudopotential files are located.
            structure: file with the structure, :class:`Structure` object or dictionary with ABINIT geo variable
            ndtset: Number of datasets.
            comment: Optional string with a comment that will be placed at the beginning of the file.
            decorators: List of `AbinitInputDecorator` objects.
        """
        # Dataset[0] contains the global variables common to the different datasets
        # Dataset[1:ndtset+1] stores the variables specific to the different datasets.
        self._ndtset = ndtset

        self._datasets = []
        for i in range(ndtset + 1):
            dt0 = None
            if i > 0: dt0 = self._datasets[0]
            self._datasets.append(Dataset(index=i, dt0=dt0))

        self._datasets[0]["ndtset"] = ndtset

        # Setup of the pseudopotential files.
        if isinstance(pseudos, PseudoTable):
            self._pseudos = pseudos

        elif all(isinstance(p, Pseudo) for p in pseudos):
            self._pseudos = PseudoTable(pseudos)

        else:
            # String(s)
            pseudo_dir = os.path.abspath(pseudo_dir)
            pseudo_paths = [
                os.path.join(pseudo_dir, p) for p in list_strings(pseudos)
            ]

            missing = [p for p in pseudo_paths if not os.path.exists(p)]
            if missing:
                raise self.Error(
                    "Cannot find the following pseudopotential files:\n%s" %
                    str(missing))

            self._pseudos = PseudoTable(pseudo_paths)

        if structure is not None: self.set_structure(structure)
        if comment is not None: self.set_comment(comment)

        self._decorators = [] if not decorators else decorators
Пример #35
0
    def merge(self, workdir, pot_files, out_dvdb, delete_source=True):
        """
        Merge POT files containing 1st order DFPT potential
        return the absolute path of the new database in workdir.

        Args:
            delete_source: True if POT1 files should be removed after (successful) merge.
        """
        # We work with absolute paths.
        pot_files = [os.path.abspath(s) for s in list_strings(pot_files)]
        if not os.path.isabs(out_dvdb):
            out_dvdb = os.path.join(os.path.abspath(workdir), os.path.basename(out_dvdb))

        if self.verbose:
            print("Will merge %d files into output DVDB %s" % (len(pot_files), out_dvdb))
            for i, f in enumerate(pot_files):
                print(" [%d] %s" % (i, f))

        # Handle the case of a single file since mrgddb uses 1 to denote GS files!
        if len(pot_files) == 1:
            with open(pot_files[0], "r") as inh, open(out_dvdb, "w") as out:
                for line in inh:
                    out.write(line)
            return out_dvdb

        self.stdin_fname, self.stdout_fname, self.stderr_fname = \
            map(os.path.join, 3 * [os.path.abspath(workdir)], ["mrgdvdb.stdin", "mrgdvdb.stdout", "mrgdvdb.stderr"])

        inp = StringIO()
        inp.write(out_dvdb + "\n")             # Name of the output file.
        inp.write(str(len(pot_files)) + "\n")  # Number of input POT files.

        # Names of the POT files.
        for fname in pot_files:
            inp.write(fname + "\n")

        self.stdin_data = [s for s in inp.getvalue()]

        with open(self.stdin_fname, "wt") as fh:
            fh.writelines(self.stdin_data)
            # Force OS to write data to disk.
            fh.flush()
            os.fsync(fh.fileno())

        retcode = self.execute(workdir)
        if retcode == 0 and delete_source:
            # Remove pot files.
            for f in pot_files:
                try:
                    os.remove(f)
                except IOError:
                    pass

        return out_dvdb
Пример #36
0
    def edit_files(self, fnames, ask_for_exit=True):
        for (idx, fname) in enumerate(list_strings(fnames)):
            exit_status = self.edit_file(fname)

            if exit_status != 0:
                return exit_status

            if ask_for_exit and idx != len(fnames) - 1 and _user_wants_to_exit():
                break

        return 0
Пример #37
0
    def edit_files(self, fnames, ask_for_exit=True):
        for (idx, fname) in enumerate(list_strings(fnames)):
            exit_status = self.edit_file(fname)

            if exit_status != 0:
                return exit_status

            if ask_for_exit and idx != len(fnames) - 1 and _user_wants_to_exit():
                break

        return 0
Пример #38
0
 def vars_with_section(self, sections):
     """
     List of :class:`Variable` associated to the given sections.
     sections can be a string or a list of strings.
     """
     sections = set(list_strings(sections))
     vars = []
     for v in self.values():
         if v.section in sections:
             vars.append(v)
     return vars
Пример #39
0
    def __init__(
        self,
        structure,
        pseudos,
        pseudo_dir=None,
        comment=None,
        abi_args=None,
        abi_kwargs=None,
    ):
        """
        Args:
            structure: Parameters defining the crystalline structure. Accepts |Structure| object
            file with structure (CIF, netcdf file, ...) or dictionary with ABINIT geo variables.
            pseudos: Pseudopotentials to be used for the calculation. Accepts: string or list of strings
                with the name of the pseudopotential files, list of |Pseudo| objects
                or |PseudoTable| object.
            pseudo_dir: Name of the directory where the pseudopotential files are located.
            ndtset: Number of datasets.
            comment: Optional string with a comment that will be placed at the beginning of the file.
            abi_args: list of tuples (key, value) with the initial set of variables. Default: Empty
            abi_kwargs: Dictionary with the initial set of variables. Default: Empty
        """
        # Internal dict with variables. we use an ordered dict so that
        # variables will be likely grouped by `topics` when we fill the input.
        abi_args = [] if abi_args is None else abi_args
        for key, value in abi_args:
            self._check_varname(key)

        abi_kwargs = {} if abi_kwargs is None else abi_kwargs
        for key in abi_kwargs:
            self._check_varname(key)

        args = list(abi_args)[:]
        args.extend(list(abi_kwargs.items()))

        self._vars = dict(args)
        self.set_structure(structure)

        if pseudo_dir is not None:
            pseudo_dir = os.path.abspath(pseudo_dir)
            if not os.path.exists(pseudo_dir):
                raise self.Error(f"Directory {pseudo_dir} does not exist")
            pseudos = [
                os.path.join(pseudo_dir, p) for p in list_strings(pseudos)
            ]

        try:
            self._pseudos = PseudoTable.as_table(
                pseudos).get_pseudos_for_structure(self.structure)
        except ValueError as exc:
            raise self.Error(str(exc))

        if comment is not None:
            self.set_comment(comment)
Пример #40
0
    def get_elastic_properties_dataframe(self,
                                         tensor_names="all",
                                         properties_as_index=False,
                                         include_base_props=True,
                                         ignore_errors=False,
                                         fit_to_structure=False,
                                         symprec=0.1):
        """
        Return a |pandas-DataFrame| with properties derived from the elastic tensor
        and the associated structure

        Args:
            tensor_names= ["elastic_relaxed", "elastic_clamped", "elastic_stress_corr", "elastic_relaxed_fixed_D"]
            properties_as_index:
            include_base_props (bool): whether to include base properties, like k_vrh, etc.
            ignore_errors (bool): if set to true, will set problem properties
                that depend on a physical tensor to None, defaults to False
            fit_to_structure (bool): If True, properties are computed with the orginal tensors
                and symmetrized tensors. An additional column `fit_to_structure` is added to the dataframe.
            symprec (float): symmetry tolerance for the Spacegroup Analyzer
                used to generate the symmetry operations if `fit_to_structure`
        """
        tensor_names = self.ALL_ELASTIC_TENSOR_NAMES if tensor_names == "all" else list_strings(
            tensor_names)
        do_fits = [False] if not fit_to_structure else [True, False]
        rows = []
        for name, tensor in self.name_tensor_list(tensor_names=tensor_names):
            for do_fit in do_fits:
                if do_fit:
                    tensor = tensor.fit_to_structure(self.structure,
                                                     symprec=symprec)
                d = tensor.get_structure_property_dict(
                    self.structure,
                    include_base_props=include_base_props,
                    ignore_errors=ignore_errors)
                d.pop("structure")
                # Add column telling whether fit has been performed
                if len(do_fits) > 1: d["fit_to_structure"] = do_fit
                d["tensor_name"] = name
                rows.append(d)

        df = pd.DataFrame(rows, columns=list(rows[0].keys() if rows else None))

        if properties_as_index:
            # TODO
            # Return transpose to have (i,j) as index and tensor names as columns
            #columns = df.columns
            df = df.drop(columns="tensor_name").T
            df.index.name = "property"
            #df.columns = columns
            return df.reset_index()
        else:
            return df
Пример #41
0
 def vars_with_char(self, chars):
     """
     List of :class:`Variable` with the specified characteristic.
     chars can be a string or a list of strings.
     """
     chars = set(list_strings(chars))
     vars = []
     for v in self.values():
         #if v.characteristic: print(v.characteristic)
         if v.characteristic in chars:
             vars.append(v)
     return vars
Пример #42
0
 def vars_with_char(self, chars):
     """
     List of :class:`Variable` with the specified characteristic.
     chars can be a string or a list of strings.
     """
     chars = set(list_strings(chars))
     varlist = []
     for v in self.values():
         #if v.characteristic: print(v.characteristic)
         if v.characteristic in chars:
             varlist.append(v)
     return varlist
Пример #43
0
    def get_eos_fits_dataframe(self, eos_names="murnaghan"):
        """
        Fit energy as function of volume to get the equation of state,
        equilibrium volume, bulk modulus and its derivative wrt to pressure.

        Args:
            eos_names: String or list of strings with EOS names.
                For the list of available models, see pymatgen.analysis.eos.

        Return:
            (fits, dataframe) namedtuple.
                fits is a list of ``EOSFit object``
                dataframe is a |pandas-DataFrame| with the final results.
        """
        # Read volumes and energies from the GSR files.
        energies, volumes = [], []
        for label, gsr in self.items():
            energies.append(float(gsr.energy))
            volumes.append(float(gsr.structure.volume))

        # Order data by volumes if needed.
        if np.any(np.diff(volumes) < 0):
            ves = sorted(zip(volumes, energies), key=lambda t: t[0])
            volumes = [t[0] for t in ves]
            energies = [t[1] for t in ves]

        # Note that eos.fit expects lengths in Angstrom, and energies in eV.
        # I'm also monkey-patching the plot method.
        from pymatgen.analysis.eos import EOS
        if eos_names == "all":
            # Use all the available models.
            eos_names = [n for n in EOS.MODELS if n not in ("deltafactor", "numerical_eos")]
        else:
            eos_names = list_strings(eos_names)

        fits, index, rows = [], [], []
        for eos_name in eos_names:
            try:
                fit = EOS(eos_name=eos_name).fit(volumes, energies)
            except Exception as exc:
                cprint("EOS %s raised exception:\n%s" % (eos_name, str(exc)))
                continue

            # Replace plot with plot_ax method
            fit.plot = fit.plot_ax
            fits.append(fit)
            index.append(eos_name)
            rows.append(OrderedDict([(aname, getattr(fit, aname)) for aname in
                ("v0", "e0", "b0_GPa", "b1")]))

        dataframe = pd.DataFrame(rows, index=index, columns=list(rows[0].keys()) if rows else None)
        return dict2namedtuple(fits=fits, dataframe=dataframe)
Пример #44
0
    def __init__(self, parent, filepaths=(), **kwargs):
        """
        Args:
            parent:
                parent window.
            filepaths:
                String or list of strings with the path of the nc files to open
                Empty tuple if no file should be opened during the initialization of the frame.
        """
        if "size" not in kwargs:
            kwargs["size"] = (1200, 800)

        super(NcViewerFrame, self).__init__(parent, id=-1, **kwargs)

        # This combination of options for config seems to work on my Mac.
        self.config = wx.FileConfig(appName=self.codename,
                                    localFilename=self.codename + ".ini",
                                    style=wx.CONFIG_USE_LOCAL_FILE)

        # Build menu, toolbar and status bar.
        self.makeMenu()
        self.makeToolBar()
        self.statusbar = self.CreateStatusBar()

        # Open netcdf files.
        filepaths, datasets = list_strings(filepaths), []
        filepaths = map(os.path.abspath, filepaths)

        for path in filepaths:
            datasets.append(netCDF4.Dataset(path, mode="r"))
            self.AddFileToHistory(path)

        # Create the notebook (each file will have its own tab).
        panel = wx.Panel(self, -1)
        try:
            self.notebook = fnb.FlatNotebook(
                panel, -1, style=fnb.FNB_NAV_BUTTONS_WHEN_NEEDED)
        except AttributeError:
            self.notebook = fnb.FlatNotebook(panel, -1)

        for path, dataset in zip(filepaths, datasets):
            tab = NcFileTab(self.notebook, dataset)
            self.notebook.AddPage(tab, os.path.basename(path))

        sizer = wx.BoxSizer(wx.VERTICAL)
        sizer.Add(self.notebook, 1, wx.EXPAND, 5)
        panel.SetSizerAndFit(sizer)

        self.Bind(wx.EVT_CLOSE, self.OnExit)

        # Intercept the command event associated to variable/dimension comparison.
        self.Bind(EVT_COMPARE, self.OnCompare)
Пример #45
0
    def __init__(self, parent, filepaths, **kwargs):
        """
        Args:
            parent:
                Parent window.
            filepaths:
                String or List of strings with filepaths.
        """
        super(FileCheckBoxPanel, self).__init__(parent, -1, **kwargs)

        self.all_filepaths = list_strings(filepaths)

        self.BuildUi()
Пример #46
0
def x2_grid(what_list):
    """
    Build (x, 2) grid of plots or just (1, 1) depending of the length of what_list.

    Return: (num_plots, ncols, nrows, what_list)
    """
    what_list = list_strings(what_list)
    num_plots, ncols, nrows = len(what_list), 1, 1
    if num_plots > 1:
        ncols = 2
        nrows = (num_plots // ncols) + (num_plots % ncols)

    return num_plots, ncols, nrows, what_list
Пример #47
0
    def filter(self, names):
        """
        Returns a list with the names matching the pattern.
        """
        names = list_strings(names)

        fnames = []
        for f in names:
            for pat in self.pats:
                if fnmatch.fnmatch(f, pat):
                    fnames.append(f)

        return fnames
Пример #48
0
    def filter(self, names):
        """
        Returns a list with the names matching the pattern.
        """
        names = list_strings(names)

        fnames = []
        for f in names:
            for pat in self.pats:
                if fnmatch.fnmatch(f, pat):
                    fnames.append(f)

        return fnames
Пример #49
0
    def __init__(self, parent, filepaths, **kwargs):
        """
        Args:
            parent:
                Parent window.
            filepaths:
                String or List of strings with filepaths.
        """
        super(FileCheckBoxPanel, self).__init__(parent, -1, **kwargs)

        self.all_filepaths = list_strings(filepaths)

        self.BuildUi()
Пример #50
0
    def from_dojodir(cls, top, exclude_wildcard=None, exclude_basenames=None):
        """
        Initialize the table from one of the top level directories located
        in the pseudo_dojo.pseudos directory.

        Args:
            top: top level directory
            exclude_basenames: Optional string or list of strings with the
                pseudo basenames to be excluded.
            exclude_wildcard: String of tokens separated by "|". Each token represents a pattern.
                to be exluded
                Example:
                  wildcard="*_r.psp8|*.xml" selects only those files that do not end with _r.psp8 or .xml

        .. warning::

            The table may contain multiple pseudos for a given chemical element.
            Don't use this method unless you need this feature and you know what you are doing.
        """
        # Read metadata from the __init__.py file
        import imp
        module_name = os.path.join(top, "__init__.py")
        if not os.path.isfile(module_name):
            raise RuntimeError("__init_.py file is missing in dir: %s" % top)

        meta = imp.load_source(module_name, os.path.join(top, "__init__.py") )

        # Gather all pseudos starting from the current working directory
        all_symbols = set(e.symbol for e in Element)
        dirs = [os.path.join(top, d) for d in os.listdir(top) if d in all_symbols]

        exclude = set(list_strings(exclude_basenames)) if exclude_basenames is not None else set()

        paths = []
        for dr in dirs:
            paths.extend(os.path.join(dr, f) for f in os.listdir(dr)
                         if f.endswith(meta.pseudo_ext) and f not in exclude)

        if exclude_wildcard is not None:
            wild = WildCard(exclude_wildcard)
            paths = [p for p in paths if not wild.match(os.path.basename(p))]

        pseudos = []
        for p in paths:
            pseudo = dojopseudo_from_file(p)
            if pseudo is None:
                print("Error while parsing:", p)
                continue
            pseudos.append(pseudo)

        return cls(pseudos).sort_by_z()
Пример #51
0
    def from_dirs(cls, dirpaths, walk=True, abspath=False):
        """
        Similar to `from_dir` but accepts a list of directories instead of a single directory.

        Args:
	    walk: if True, directories inside `top` are included as well.
            abspath: True if paths in index should be absolute. Default: Relative to `top`.
        """
        items = []
        for top in list_strings(dirpaths):
            items.extend(cls._open_files_in_dir(top, walk))
        new = cls(*items)
        if not abspath: new.trim_paths(start=os.getcwd())
        return new
Пример #52
0
    def __init__(self, parent, filepaths=(), **kwargs):
        """
        Args:
            parent:
                parent window.
            filepaths:
                String or list of strings with the path of the nc files to open
                Empty tuple if no file should be opened during the initialization of the frame.
        """
        if "size" not in kwargs:
            kwargs["size"] = (1200, 800)

        super(NcViewerFrame, self).__init__(parent, id=-1, **kwargs)

        # This combination of options for config seems to work on my Mac.
        self.config = wx.FileConfig(appName=self.codename, localFilename=self.codename + ".ini", 
                                    style=wx.CONFIG_USE_LOCAL_FILE)

        # Build menu, toolbar and status bar.
        self.makeMenu()
        self.makeToolBar()
        self.statusbar = self.CreateStatusBar()

        # Open netcdf files.
        filepaths, datasets = list_strings(filepaths), []
        filepaths = map(os.path.abspath, filepaths)

        for path in filepaths:
            datasets.append(netCDF4.Dataset(path, mode="r"))
            self.AddFileToHistory(path)

        # Create the notebook (each file will have its own tab).
        panel = wx.Panel(self, -1)
        try:
            self.notebook = fnb.FlatNotebook(panel, -1, style=fnb.FNB_NAV_BUTTONS_WHEN_NEEDED)
        except AttributeError:
            self.notebook = fnb.FlatNotebook(panel, -1)

        for path, dataset in zip(filepaths, datasets):
            tab = NcFileTab(self.notebook, dataset)
            self.notebook.AddPage(tab, os.path.basename(path))

        sizer = wx.BoxSizer(wx.VERTICAL)
        sizer.Add(self.notebook, 1, wx.EXPAND, 5)
        panel.SetSizerAndFit(sizer)

        self.Bind(wx.EVT_CLOSE, self.OnExit)

        # Intercept the command event associated to variable/dimension comparison.
        self.Bind(EVT_COMPARE, self.OnCompare)
Пример #53
0
    def __init__(self, pseudos, pseudo_dir="", ndtset=1, comment=""):
        """
        Args:
            pseudos:
                String or list of string with the name of the pseudopotential files.
            pseudo_dir:
                Name of the directory where the pseudopotential files are located.
            ndtset:
                Number of datasets.
            comment:
                Optional string with a comment that will be placed at the beginning of the file.
        """
        # Dataset[0] contains the global variables common to the different datasets
        # Dataset[1:ndtset+1] stores the variables specific to the different datasets.
        self._ndtset = ndtset

        self._datasets = []
        for i in range(ndtset+1):
            dt0 = None
            if i > 0: dt0 = self._datasets[0]
            self._datasets.append(Dataset(index=i, dt0=dt0))

        self._datasets[0]["ndtset"] = ndtset

        # Setup of the pseudopotential files.
        if isinstance(pseudos, PseudoTable):
            self._pseudos = pseudos

        elif all(isinstance(p, Pseudo) for p in pseudos):
            self._pseudos = PseudoTable(pseudos)

        else:
            # String(s)
            pseudo_dir = os.path.abspath(pseudo_dir)
            pseudo_paths = [os.path.join(pseudo_dir, p) for p in list_strings(pseudos)]

            missing = [p for p in pseudo_paths if not os.path.exists(p)]
            if missing:
                raise self.Error("Cannot find the following pseudopotential files:\n%s" % str(missing)) 

            #try:
            self._pseudos = PseudoTable(pseudo_paths)
            #except Exception as exc:
            #    msg = "\nIgnoring error raised while parsing pseudopotential files:\n Backtrace:" + straceback()
            #    warnings.warn(msg)
            #    self._pseudos = []

        if comment:
            self.set_comment(comment)
Пример #54
0
    def merge(self, workdir, ddb_files, out_ddb, description, delete_source_ddbs=True):
        """Merge DDB file, return the absolute path of the new database in workdir."""
        # We work with absolute paths.
        ddb_files = [os.path.abspath(s) for s in list_strings(ddb_files)]
        if not os.path.isabs(out_ddb):
            out_ddb = os.path.join(os.path.abspath(workdir), os.path.basename(out_ddb))

        if self.verbose:
            print("Will merge %d files into output DDB %s" % (len(ddb_files), out_ddb))
            for i, f in enumerate(ddb_files):
                print(" [%d] %s" % (i, f))

        # Handle the case of a single file since mrgddb uses 1 to denote GS files!
        if len(ddb_files) == 1:
            with open(ddb_files[0], "r") as inh, open(out_ddb, "w") as out:
                for line in inh:
                    out.write(line)
            return out_ddb

        self.stdin_fname, self.stdout_fname, self.stderr_fname = \
            map(os.path.join, 3 * [os.path.abspath(workdir)], ["mrgddb.stdin", "mrgddb.stdout", "mrgddb.stderr"])

        inp = StringIO()
        inp.write(out_ddb + "\n")              # Name of the output file.
        inp.write(str(description) + "\n")     # Description.
        inp.write(str(len(ddb_files)) + "\n")  # Number of input DDBs.

        # Names of the DDB files.
        for fname in ddb_files:
            inp.write(fname + "\n")

        self.stdin_data = [s for s in inp.getvalue()]

        with open(self.stdin_fname, "wt") as fh:
            fh.writelines(self.stdin_data)
            # Force OS to write data to disk.
            fh.flush()
            os.fsync(fh.fileno())

        retcode = self.execute(workdir, exec_args=['--nostrict'])
        if retcode == 0 and delete_source_ddbs:
            # Remove ddb files.
            for f in ddb_files:
                try:
                    os.remove(f)
                except IOError:
                    pass

        return out_ddb
Пример #55
0
    def reset(self, status_list="failed", write=True):
        """
        Reset all the records whose status is in `status_list` so that we can resubmit them.
        Return the number of records that have been resetted.
        """
        status_list = list_strings(status_list)
        count = 0
        for struct_type, formula, data in self.iter_struct_formula_data():
            if data in status_list:
                self[struct_type][formula] = None
                count += 1

        # Update the database.
        if count and write: self.json_write()
        return count
Пример #56
0
def makeAboutBox(codename, version, description, developers, website=None, icon_path=None):

    licence = """%(codename)s is free software; you can redistribute 
it and/or modify it under the terms of the GNU General Public License as 
published by the Free Software Foundation; either version 2 of the License, 
or (at your option) any later version.

%(codename)s is distributed in the hope that it will be useful, 
but WITHOUT ANY WARRANTY; without even the implied warranty of 
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  
See the GNU General Public License for more details. You should have 
received a copy of the GNU General Public License along with the code; 
if not, write to the Free Software Foundation, Inc., 59 Temple Place, 
Suite 330, Boston, MA  02111-1307  USA""" % {"codename": codename}

    # Make a template for the description 
    #desc = "\n".join(["\nwxPython Cookbook Chapter 5\n",
    #                  "Platform Info: (%s,%s)",
    #                  "License: Public Domain"])

    ## Get the platform information 
    #py_version = [sys.platform, ", python ", sys.version.split()[0]] 
    #platform = list(wx.PlatformInfo[1:])
    #platform[0] += (" " + wx.VERSION_STRING) 
    #wx_info = ", ".join(platform)

    #info.SetDescription(desc % (py_version, wx_info))

    info = wx.AboutDialogInfo()

    if icon_path is not None:
        info.SetIcon(wx.Icon(icon_path, wx.BITMAP_TYPE_PNG))

    info.SetName(codename)
    info.SetVersion(version)
    info.SetDescription(description)
    info.SetCopyright('(C) Abipy group')

    if website is not None:
        info.SetWebSite(website)

    info.SetLicence(licence)

    for dev in list_strings(developers):
        info.AddDeveloper(dev)

    wx.AboutBox(info)
Пример #57
0
    def from_flow(cls, flow, outdirs="all", nids=None):
        """
        Build a robot from a Flow.

        Args:
            flow: :class:`Flow` object
            outdirs: String used to select/ignore the files in the output directory of flow, works and tasks
                outdirs="work" selects only the outdir of the Works,
                outdirs="flow+task" selects the outdir of the Flow and the outdirs of the tasks
                outdirs="-work" excludes the outdir of the Works.
                Cannot use `+` and `-` flags in the same string.
                Default: `all` that is equivalent to "flow+work+task"
            nids: List of node identifiers used to select particular nodes. Not used if None

        Returns:
            `Robot` subclass.
        """
        robot = cls()
        all_opts = ["flow", "work", "task"]

        if outdirs == "all":
            tokens = all_opts
        elif "+" in outdirs:
            assert "-" not in outdirs
            tokens = outdirs.split("+")
        elif "-" in outdirs:
            assert "+" not in outdirs
            tokens = [s for s in all if s not in outdirs.split("-")]
        else:
            tokens = list_strings(outdirs)

        if not all(t in all_opts for t in tokens):
            raise ValueError("Wrong outdirs string %s" % outdirs)

        if "flow" in tokens:
            robot.add_extfile_of_node(flow, nids=nids)

        if "work" in tokens:
            for work in flow:
                robot.add_extfile_of_node(work, nids=nids)

        if "task" in tokens:
            for task in flow.iflat_tasks():
                #print("task %s, nids %s" %  (task, nids))
                robot.add_extfile_of_node(task, nids=nids)

        return robot
Пример #58
0
    def __init__(self, parent, filenames, num_dirs=2, **kwargs):
        """
        Args:
            parent:
                Parent Widget.
            filenames:
                List of filenames.
            num_dirs:
                Maximum number of directories that will be shown in the tab.
        """
        if "title" not in kwargs:
            kwargs["title"] = "Abinit Events"

        super(AbinitEventsNotebookFrame, self).__init__(parent, **kwargs)

        filenames = list_strings(filenames)

        # Remove inexistent files.
        filenames = filter(os.path.exists, filenames)

        if not filenames:
            return

        # Here we create a panel and a notebook on the panel
        panel = awx.Panel(self)

        nb = fnb.FlatNotebook(panel)

        for fname in filenames:
            page = AbinitEventsPanel(nb, fname)
            page_name = fname

            if num_dirs > 0:
                tokens = page_name.split(os.path.sep)
                page_name = os.path.join(*tokens[-num_dirs:])

            # Add only files for which we have events.
            #if page.has_events:

            # Add the pages to the notebook with the name to show on the tab
            nb.AddPage(page, text=page_name)

        # Finally, put the notebook in a sizer for the panel to manage the layout
        sizer = wx.BoxSizer()
        sizer.Add(nb, 1, wx.EXPAND)

        panel.SetSizerAndFit(sizer)
Пример #59
0
    def dojo_compare(self, what="all", **kwargs):
        """Compare ecut convergence and Deltafactor, GBRV results"""
        import matplotlib.pyplot as plt
        show = kwargs.pop("show", True)
        what = list_strings(what)
        figs = []

        if all(p.dojo_report.has_trial("deltafactor") for p in self) and \
               any(k in what for k in ("all", "ecut")):

            fig_etotal, ax_list = plt.subplots(nrows=len(self), ncols=1, sharex=True, squeeze=True)
            #ax_list, fig, plt = get_axarray_fig_plt(ax_list, nrows=len(self), ncols=1, sharex=True, squeeze=True)
            figs.append(fig_etotal)

            for ax, pseudo in zip(ax_list, self):
                pseudo.dojo_report.plot_etotal_vs_ecut(ax=ax, show=False, label=pseudo.basename)
            if show: plt.show()

        if all(p.dojo_report.has_trial("deltafactor") for p in self) and \
               any(k in what for k in ("all", "df", "deltafactor")):

            fig_deltafactor, ax_grid = plt.subplots(nrows=5, ncols=len(self), sharex=True, sharey="row", squeeze=False)
            #ax_list, fig, plt = get_axarray_fig_plt(ax_list, nrows=5, ncols=len(self), sharex=True, sharey="row", squeeze=False))
            figs.append(fig_deltafactor)

            for ax_list, pseudo in zip(ax_grid.T, self):
                #print("pseudo.xc:", pseudo.xc)
                pseudo.dojo_report.plot_deltafactor_convergence(xc=pseudo.xc, ax_list=ax_list, show=False)

            fig_deltafactor.suptitle(" vs ".join(p.basename for p in self))
            if show: plt.show()

        # Compare GBRV results
        if all(p.dojo_report.has_trial("gbrv_bcc") for p in self) and \
           any(k in what for k in ("all", "gbrv")):

            fig_gbrv, ax_grid = plt.subplots(nrows=2, ncols=len(self), sharex=True, sharey="row", squeeze=False)
            figs.append(fig_gbrv)
            #ax_list, fig, plt = get_axarray_fig_plt(ax_list, ncols=len(self), sharex=True, sharey="row", squeeze=False))

            for ax_list, pseudo in zip(ax_grid.T, self):
                pseudo.dojo_report.plot_gbrv_convergence(ax_list=ax_list, show=False)

            fig_gbrv.suptitle(" vs ".join(p.basename for p in self))
            if show: plt.show()

        return figs
Пример #60
0
    def remove_exts(self, exts):
        """
        Remove the files with the given extensions. Unlike rmtree, this function preserves the directory path.
        Return list with the absolute paths of the files that have been removed.
        """
        paths = []

        for ext in list_strings(exts):
            path = self.has_abiext(ext)
            if not path: continue
            try:
                os.remove(path)
                paths.append(path)
            except IOError:
                logger.warning("Exception while trying to remove file %s" % path)

        return paths