Beispiel #1
0
    def _classify(self, NP, PID, uni, eps, used):
        """Runs through the collection a second time to reclassify each
        environment according to the most-similar unique LAE identified in
        :meth:`_uniquify`.
        """
        from gblearn.soap import S
        result = {}
        
	for i in range(len(NP)):
	    Pv = NP[i,:]
	    K0 = 1.0 #Lowest similarity kernel among all unique vectors.
	    U0 = None #Key of the environment corresponding to K0
            
	    for u, uP in uni.items():
		if u not in result:
                    result[u] = [u]
            	K = S(Pv, uP)

		if K < eps:
                    #These vectors are considered to be equivalent. Store the
                    #equivalency in the result.
        	    if K < K0:
			K0 = K
			U0 = u
                
	    if K0 < eps:
		result[U0].append((PID, i))
		used[U0] = True
	    else:# pragma: no cover
                #This is just a catch warning; it should never happen in
                #practice.
                wmsg = "There was an unclassifiable SOAP vector: {}"
                msg.warn(wmsg.format((PID, i)))
                
   	return result
Beispiel #2
0
    def __init__(self, filepath, index=0, openf=None, stepfilter=None):
        self.filepath = filepath
        self.index = index

        raw = self._read(openf, stepfilter)
        #We should at least have time, type, id, xyz, box, periodic; otherwise
        #this is an incomplete dump file.
        if len(raw) < 6:
            self.types = []
            self.ids = []
            self.xyz = []
            self.box = None
            self.periodic = None
            self.extras = None
            return

        self.types = np.array(raw["type"], int)
        self.ids = np.array(raw["id"], int)
        self.xyz = np.array(raw["xyz"])
        self.extras = ["ids"]
        for key in raw:
            if "atom:" not in key:
                continue
            quant = key.split(':')[1]
            if quant == "ids":
                setattr(self, quant, np.array(raw[key]))
                continue
            self.extras.append(quant)
            setattr(self, quant, np.array(raw[key]))

        self.periodic = tuple(map(lambda p: p == "pp", raw["periodic"]))
        self.box = np.array(raw["box"])
        if len(self.xyz) != raw["natoms"]:  # pragma: no cover
            wmsg = "File {} did not have as many atoms ({}/{}) as specified."
            msg.warn(wmsg.format(self.filepath), len(self.xyz), raw["natoms"])
Beispiel #3
0
    def __init__(self, xyz, types, box, Z, extras=None, selectargs=None,
                 makelat=True, params=None, **soapargs):
        from gblearn.soap import SOAPCalculator
        from gblearn.lammps import make_lattice
        self.xyz = xyz.copy()
        self.types = types
        self.params = params.copy() if params is not None else {}
        
        if makelat:
            self.box = box
            self.lattice = make_lattice(box)
        else:
            self.box = None
            self.lattice = box.copy()
            
        self.calculator = SOAPCalculator(**soapargs)
        self.Z = Z
        self.LAEs = None

        #For the selection, if padding is present in the dictionary, reduce the
        #padding by half so that all the atoms at the GB get a full SOAP
        #environment.
        self.selectargs = selectargs
        if "padding" in self.selectargs:
            self.selectargs["padding"] /= 2

        if extras is not None:
            self.extras = extras.keys()
            for k, v in extras.items():
                if not hasattr(self, k):
                    target = v.copy()
                    if isinstance(target, FortranArray):
                        setattr(self, k, v.copy().T)
                    else:
                        setattr(self, k, v.copy())
                else:
                    msg.warn("Cannot set extra attribute `{}`; "
                             "already exists.".format(k))
        else:
            self.extras = []    
        
        self.P = None
        self._atoms = None
        """quippy.atoms.Atoms: representation of the atoms at the boundary that
        is interoperable with QUIP.
        """
        self._NP = None
        """numpy.ndarray: normalized P matrix, where each row is normalized by
        its L2 norm.
        """
        self._K = None
        """numpy.ndarray: matrix of the dot product of every row in :attr:`NP`
Beispiel #4
0
    def __init__(self,
                 xyz,
                 types,
                 box,
                 Z,
                 extras=None,
                 selectargs=None,
                 makelat=True,
                 params=None,
                 padding=10.0):

        from gblearn.lammps import make_lattice
        self.xyz = xyz.copy()
        self.types = types
        self.params = params.copy() if params is not None else {}
        self.rep_params = {"soap": {}, "scatter": {}}

        if makelat:
            self.box = box
            self.lattice = make_lattice(box)
        else:
            self.box = None
            self.lattice = box.copy()

        self.Z = None
        if isinstance(Z, int):
            self.Z = np.full(len(self), Z)
        else:  # pragma: no cover
            self.Z = np.asarray(Z)
        self.LAEs = None
        self.LER = None
        self.ASR = None

        #For the selection, if padding is present in the dictionary, reduce the
        #padding by half so that all the atoms at the GB get a full SOAP
        #environment.
        self.selectargs = selectargs
        self.padding = padding / 2.
        if extras is not None:
            self.extras = extras.keys()
            for k, v in extras.items():
                if not hasattr(self, k):
                    setattr(self, k, v)
                else:  # pragma: no cover
                    msg.warn("Cannot set extra attribute `{}`; "
                             "already exists.".format(k))
        else:  # pragma: no cover
            self.extras = []

        self.P = None
        self.Scatter = None
        self._atoms = None
        """quippy.atoms.Atoms: representation of the atoms at the boundary that
        is interoperable with QUIP.
        """
        self._NP = None
        """numpy.ndarray: normalized P matrix, where each row is normalized by
        its L2 norm.
        """
        self._K = None
        """numpy.ndarray: matrix of the dot product of every row in :attr:`NP`
Beispiel #5
0
    def scatter(self,
                density=0.5,
                Layers=2,
                SPH_L=6,
                n_trans=8,
                n_angle1=8,
                n_angle2=8,
                threads=0,
                multires=None,
                **scatterargs):
        """Calculates the Scatter vectors for each grain boundary.

        Args:
            threads (int): the number of threads to use. If 0, the number of cores
              will try to be determined from multiprocessing.cpu_count(). If this fails
              1 thread will be used.
            scatterargs (dict): key-value pairs of the Scatter parameters (see :module: `SNET`)
        """
        defargs = {
            "density": density,
            "Layers": Layers,
            "SPH_L": SPH_L,
            "n_trans": n_trans,
            "n_angle1": n_angle1,
            "n_angle2": n_angle2
        }
        scatterargs.update(defargs)
        if multires is not None:
            self.repargs["scatter"] = multires
            self.store.configure("scatter", multires)
        else:
            self.repargs["scatter"] = scatterargs
            self.store.configure("scatter", **scatterargs)
        Scatter = self.store.Scatter

        if len(Scatter) == len(self):
            for gbid, gb in self.items():
                self[gbid].rep_params["scatter"] = scatterargs
            #No need to recompute if the store has the result.
            return Scatter

        if threads == 0:  # pragma: no cover
            try:
                threads = mp.cpu_count()
            except NotImplementedError:
                msg.warn("Unable able to determine number of available CPU's, "
                         "resorting to 1 thread")
                threads = 1

        pbar = tqdm(total=len(self))

        def _update(res):
            """Updates the tqdm bar and
                adds the completed scatter vector to the storce
            """
            pbar.update()
            Scatter[res[0]] = res[1]

        pool = mp.Pool(processes=threads)
        result = {}

        if multires is not None:
            for gbid, gb in self.items():
                pool.apply_async(_mutlires_scatter_mp,
                                 args=(gbid, gb, multires),
                                 callback=_update)
        else:
            for gbid, gb in self.items():
                pool.apply_async(_scatter_mp,
                                 args=(gbid, gb, scatterargs),
                                 callback=_update)
        pool.close()
        pool.join()