Ejemplo n.º 1
0
 def transform_coordinates(self, coordinates, argument=None):
     """
     translate coordinates.
     
     :Parameters:
         #. coordinates (np.ndarray): The coordinates on which to apply the translation.
         
     :Returns:
         #. coordinates (np.ndarray): The new coordinates after applying the translation.
         #. argument (object): Any python object. Not used in this generator.
     """
     if coordinates.shape[0] <= 1:
         # atoms where removed, fall back to random translation
         return coordinates + generate_random_vector(
             minAmp=self.__amplitude[0], maxAmp=self.__amplitude[1])
     else:
         # get translation amplitude
         maxAmp = self.amplitude[1] - self.amplitude[0]
         if self.direction is None:
             amplitude = (1 - 2 * generate_random_float()) * maxAmp
         elif self.direction:
             amplitude = generate_random_float() * maxAmp
         else:
             amplitude = -generate_random_float() * maxAmp
         # get axis of translation
         _, _, _, _, X, Y, Z = get_principal_axis(coordinates)
         translationAxis = [X, Y, Z][self.__axis]
         # compute baseVector
         baseVector = FLOAT_TYPE(
             np.sign(amplitude) * translationAxis * self.amplitude[0])
         # compute translation vector
         vector = baseVector + translationAxis * FLOAT_TYPE(amplitude)
         # translate and return
         return coordinates + vector
Ejemplo n.º 2
0
 def compute_data(self):
     """ Compute constraint's data. """
     self.__coordNumData = np.array(
         [FLOAT_TYPE(0) for _ in self.__coordNumData], dtype=FLOAT_TYPE)
     all_atoms_coord_number_coords(boxCoords=self.engine.boxCoordinates,
                                   basis=self.engine.basisVectors,
                                   isPBC=self.engine.isPBC,
                                   coresIndexes=self.__coresIndexes,
                                   shellsIndexes=self.__shellsIndexes,
                                   lowerShells=self.__lowerShells,
                                   upperShells=self.__upperShells,
                                   asCoreDefIdxs=self.__asCoreDefIdxs,
                                   inShellDefIdxs=self.__inShellDefIdxs,
                                   coordNumData=self.__coordNumData,
                                   ncores=self.engine._runtime_ncores)
     self.__coordNumData /= FLOAT_TYPE(2.)
     # update data
     self.set_data(self.__coordNumData)
     self.set_active_atoms_data_before_move(None)
     self.set_active_atoms_data_after_move(None)
     # set standardError
     stdErr = self.compute_standard_error(data=self.__coordNumData)
     self.set_standard_error(stdErr)
     # set original data
     if self.originalData is None:
         self._set_original_data(self.data)
Ejemplo n.º 3
0
 def transform_coordinates(self, coordinates, argument=None):
     """
     Translate coordinates.
     
     :Parameters:
         #. coordinates (np.ndarray): The coordinates on which to apply the translation.
         
     :Returns:
         #. coordinates (np.ndarray): The new coordinates after applying the translation.
         #. argument (object): Any python object. Not used in this generator.
     """
     # get center
     center = self.__get_center()
     # compute coordinates center
     coordsCenter = np.mean(coordinates, axis=0)
     direction = center - coordsCenter
     # translation vector
     translationAxis = self.__get_translation_axis(direction)
     # get amplitude
     amplitude = self.__get_amplitude()
     # compute baseVector
     baseVector = FLOAT_TYPE(
         np.sign(amplitude) * translationAxis * self.amplitude[0])
     # compute translation vector
     vector = baseVector + translationAxis * FLOAT_TYPE(amplitude)
     # translate and return
     return coordinates + vector
Ejemplo n.º 4
0
 def transform_coordinates(self, coordinates, argument):
     """
     Rotate coordinates.
     
     :Parameters:
         #. coordinates (np.ndarray): The coordinates on which to apply the translation.
         
     :Returns:
         #. coordinates (np.ndarray): The new coordinates after applying the translation.
         #. argument (float): The move distance.
     """
     if coordinates.shape[0] <= 1:
         # atoms where removed, fall back to random translation
         return coordinates + generate_random_vector(
             minAmp=self.__amplitude[0], maxAmp=self.__amplitude[1])
     else:
         # get translation amplitude
         amplitude = FLOAT_TYPE(argument)
         # get vector of translation
         _, _, _, _, X, Y, Z = get_principal_axis(coordinates)
         vector = [X, Y, Z][self.__axis]
         # amplify vector
         vector *= FLOAT_TYPE(amplitude)
         # translate and return
         return coordinates + vector
Ejemplo n.º 5
0
 def set_adjust_scale_factor(self, adjustScaleFactor):
     """
     Sets adjust scale factor.
     
     :Parameters:
         #. adjustScaleFactor (list, tuple): Used to adjust fit or guess the best scale factor during EMC runtime. 
            It must be a list of exactly three entries.\n
            1. The frequency in number of accepted moves of finding the best scale factor. 
               If 0 frequency is given, it means that the scale factor is fixed.
            2. The minimum allowed scale factor value.
            3. The maximum allowed scale factor value.
     """
     assert isinstance(adjustScaleFactor, (list, tuple)), LOGGER.error('adjustScaleFactor must be a list.')
     assert len(adjustScaleFactor) == 3, LOGGER.error('adjustScaleFactor must be a list of exactly three items.')
     freq  = adjustScaleFactor[0]
     minSF = adjustScaleFactor[1]
     maxSF = adjustScaleFactor[2]
     assert is_integer(freq), LOGGER.error("adjustScaleFactor first item (frequency) must be an integer.")
     freq = INT_TYPE(freq)
     assert freq>=0, LOGGER.error("adjustScaleFactor first (frequency) item must be bigger or equal to 0.")
     assert is_number(minSF), LOGGER.error("adjustScaleFactor second item (minimum) must be a number.")
     minSF = FLOAT_TYPE(minSF)
     assert is_number(maxSF), LOGGER.error("adjustScaleFactor third item (maximum) must be a number.")
     maxSF = FLOAT_TYPE(maxSF)
     assert minSF<=maxSF, LOGGER.error("adjustScaleFactor second item (minimum) must be smaller or equal to third second item (maximum).")
     # set values
     self.__adjustScaleFactorFrequency = freq
     self.__adjustScaleFactorMinimum   = minSF
     self.__adjustScaleFactorMaximum   = maxSF
     # dump to repository
     self._dump_to_repository({'_ExperimentalConstraint__adjustScaleFactorFrequency': self.__adjustScaleFactorFrequency,
                               '_ExperimentalConstraint__adjustScaleFactorMinimum'  : self.__adjustScaleFactorMinimum,
                               '_ExperimentalConstraint__adjustScaleFactorMaximum'  : self.__adjustScaleFactorMaximum})
     # reset constraint
     self.reset_constraint()
 def _update_shape_array(self):
     rmin = self._shapeFuncParams['rmin']
     rmax = self._shapeFuncParams['rmax']
     dr = self._shapeFuncParams['dr']
     qmin = self._shapeFuncParams['qmin']
     qmax = self._shapeFuncParams['qmax']
     dq = self._shapeFuncParams['dq']
     if rmax is None:
         if self.engine.isPBC:
             a = self.engine.boundaryConditions.get_a()
             b = self.engine.boundaryConditions.get_b()
             c = self.engine.boundaryConditions.get_c()
             rmax = FLOAT_TYPE(np.max([a, b, c]) + 10)
         else:
             coordsCenter = np.sum(
                 self.engine.realCoordinates,
                 axis=0) / self.engine.realCoordinates.shape[0]
             coordinates = self.engine.realCoordinates - coordsCenter
             distances = np.sqrt(np.sum(coordinates**2, axis=1))
             maxDistance = 2. * np.max(distances)
             rmax = FLOAT_TYPE(maxDistance + 10)
             LOGGER.warn(
                 "Better set shape function rmax with infinite boundary conditions. Here value is automatically set to %s"
                 % rmax)
     shapeFunc = ShapeFunction(engine=self.engine,
                               weighting=self.weighting,
                               qmin=qmin,
                               qmax=qmax,
                               dq=dq,
                               rmin=rmin,
                               rmax=rmax,
                               dr=dr)
     self._shapeArray = shapeFunc.get_gr_shape_function(self.shellCenters)
     del shapeFunc
Ejemplo n.º 7
0
 def set_data_weights(self, dataWeights):
     """
     Set experimental data points weight.
     
     :Parameters: 
         #. dataWeights (None, numpy.ndarray): A weights array of the same number of points of experimentalData used in the constraint's standard error computation.
            Therefore particular fitting emphasis can be put on different data points that might be considered as more or less
            important in order to get a reasonable and plausible modal.\n
            If None is given, all data points are considered of the same importance in the computation of the constraint's standard error.\n
            If numpy.ndarray is given, all weights must be positive and all zeros weighted data points won't contribute to the 
            total constraint's standard error. At least a single weight point is required to be non-zeros and the weights 
            array will be automatically scaled upon setting such as the the sum of all the weights is equal to the number of data points.       
     """
     if dataWeights is not None:
         assert isinstance(dataWeights, (list, tuple, np.ndarray)), LOGGER.error("dataWeights must be None or a numpy array of weights")
         try:
             dataWeights = np.array(dataWeights, dtype=FLOAT_TYPE)
         except Exception as e:
             raise Exception(LOGGER.error("unable to cast dataWeights as a numpy array (%s)"%(e)))
         assert len(dataWeights.shape) == 1, LOGGER.error("dataWeights must be a vector")
         assert len(dataWeights) == self.__experimentalData.shape[0], LOGGER.error("dataWeights must be a of the same length as experimental data")
         assert np.min(dataWeights) >=0, LOGGER.error("dataWeights negative values are not allowed")
         assert np.sum(dataWeights), LOGGER.error("dataWeights must be a non-zero array")
         dataWeights /= FLOAT_TYPE( np.sum(dataWeights) )
         dataWeights *= FLOAT_TYPE( len(dataWeights) )                      
     self.__dataWeights = dataWeights
     # dump to repository
     self._dump_to_repository({'_ExperimentalConstraint__dataWeights': self.__dataWeights})
Ejemplo n.º 8
0
    def fit_scale_factor(self, experimentalData, modelData, dataWeights):
        """
        The best scale factor value is computed by minimizing :math:`E=sM`.\n
        
        Where:
            #. :math:`E` is the experimental data.
            #. :math:`s` is the scale factor.
            #. :math:`M` is the model constraint data.

        :Parameters:
            #. experimentalData (numpy.ndarray): the experimental data.
            #. modelData (numpy.ndarray): the constraint modal data.
            #. dataWeights (None, numpy.ndarray): the data points weights to compute the scale factor.
               If None, all data points will be considered as having the same weight.
            
        :Returns:
            #. scaleFactor (number): The new scale factor fit value.
        
        **NB**: This method won't update the internal scale factor value of the constraint.
        It always computes the best scale factor given some experimental and model data
        """
        if dataWeights is None:
            SF = FLOAT_TYPE( np.sum(modelData*experimentalData)/np.sum(modelData**2) )
        else:
            SF = FLOAT_TYPE( np.sum(dataWeights*modelData*experimentalData)/np.sum(modelData**2) )
        SF = max(SF, self.__adjustScaleFactorMinimum)
        SF = min(SF, self.__adjustScaleFactorMaximum)
        return SF
Ejemplo n.º 9
0
 def transform_coordinates(self, coordinates, argument=None):
     """
     translates coordinates.
     
     :Parameters:
         #. coordinates (np.ndarray): The coordinates on which to apply the translation.
         
     :Returns:
         #. coordinates (np.ndarray): The new coordinates after applying the translation.
         #. argument (object): Any python object. Not used in this generator.
     """
     # generate translation axis
     translationAxis = generate_vectors_in_solid_angle(
         direction=self.axis, maxAngle=self.__angle, numberOfVectors=1)[0]
     # get translation amplitude
     maxAmp = self.amplitude[1] - self.amplitude[0]
     if self.direction is None:
         amplitude = (1 - 2 * generate_random_float()) * maxAmp
     elif self.direction:
         amplitude = generate_random_float() * maxAmp
     else:
         amplitude = -generate_random_float() * maxAmp
     # compute baseVector
     baseVector = FLOAT_TYPE(
         np.sign(amplitude) * translationAxis * self.amplitude[0])
     # compute translation vector
     vector = baseVector + translationAxis * FLOAT_TYPE(amplitude)
     # translate and return
     return coordinates + vector
Ejemplo n.º 10
0
 def set_amplitude(self, amplitude):
     """
     Sets maximum translation vector allowed amplitude.
     
     :Parameters:
         #. amplitude (number, tuple): The translation amplitude in Angstroms.
            If number is given, it is the maximum translation amplitude in Angstroms and must be bigger than 0.
            If tuple is given, it is the limits of translation boundaries as [min,max] where min>=0 and max>min.
     """
     if isinstance(amplitude, (list, tuple, set)):
         assert len(amplitude) == 2, LOGGER.error(
             "Translation amplitude tuple must have exactly two items")
         assert is_number(amplitude[0]), LOGGER.error(
             "Translation amplitude first item must be a number")
         assert is_number(amplitude[1]), LOGGER.error(
             "Translation amplitude second item must be a number")
         min = FLOAT_TYPE(amplitude[0])
         max = FLOAT_TYPE(amplitude[1])
         assert min >= 0, LOGGER.error(
             "Translation amplitude first item must be bigger than 0")
         assert max > min, LOGGER.error(
             "Translation amplitude first item must be bigger than the second item"
         )
         amplitude = (min, max)
     else:
         assert is_number(amplitude), LOGGER.error(
             "Translation amplitude must be a number")
         amplitude = float(amplitude)
         assert amplitude > 0, LOGGER.error(
             "Translation amplitude must be bigger than 0")
         amplitude = (FLOAT_TYPE(0), FLOAT_TYPE(amplitude))
     self.__amplitude = amplitude
Ejemplo n.º 11
0
 def __init__(self, engine, weighting="atomicNumber",
                    qmin=0.001, qmax=1, dq=0.005,
                    rmin=0.00, rmax=100, dr=1):
     # get qmin
     assert is_number(qmin), LOGGER.error("qmin must be a number")
     qmin = FLOAT_TYPE(qmin)
     assert qmin>0, LOGGER.error("qmin '%s' must be bigger than 0"%qmin)
     # get qmin
     assert is_number(qmax), LOGGER.error("qmax must be a number")
     qmax = FLOAT_TYPE(qmax)
     assert qmax>qmin, LOGGER.error("qmax '%s' must be bigger than qmin '%s'"%(qmin,qmax))
     # get dq
     assert is_number(dq), LOGGER.error("dq must be a number")
     dq = FLOAT_TYPE(dq)
     assert dq>0, LOGGER.error("dq '%s' must be bigger than 0"%dq)
     # import StructureFactorConstraint
     from fullrmc.Constraints.StructureFactorConstraints import StructureFactorConstraint
     # create StructureFactorConstraint
     Q = np.arange(qmin, qmax, dq)
     D = np.transpose([Q, np.zeros(len(Q))]).astype(FLOAT_TYPE)
     self._SFC = StructureFactorConstraint(rmin=rmin, rmax=rmax, dr=dr, experimentalData=D, weighting="atomicNumber")
     self._SFC._set_engine(engine)
     self._SFC.listen(message="engine set")
     # set parameters
     self._rmin = FLOAT_TYPE(rmin)
     self._rmax = FLOAT_TYPE(rmax)
     self._dr   = FLOAT_TYPE(dr)
     self._qmin = FLOAT_TYPE(qmin)
     self._qmax = FLOAT_TYPE(qmax)
     self._dq   = FLOAT_TYPE(dq)
     self._weighting = weighting
Ejemplo n.º 12
0
 def set_angle(self, angle):
     """
     Sets the tolerance maximum angle.
     
     :Parameters:
         #. angle (number): The maximum tolerance angle in degrees between a generated translation vector and the pre-defined axis.        
     """
     assert is_number(angle), LOGGER.error("angle must be numbers")
     assert angle > 0, LOGGER.error("angle must be positive")
     assert angle <= 360, LOGGER.error("angle must be smaller than 360")
     self.__angle = FLOAT_TYPE(angle) * PI / FLOAT_TYPE(180.)
Ejemplo n.º 13
0
    def normalize_path(self, path):
        """
        Transform all path angles to radian.

        :Parameters:
            #. path (list): The list of moves in degrees.

        :Returns:
            #. path (list): The list of moves in rad.
        """
        path = [FLOAT_TYPE(angle) * PI / FLOAT_TYPE(180.) for angle in path]
        return list(path)
Ejemplo n.º 14
0
    def set_group_axis(self, groupAxis):
        """
        Sets group axis value.

        :Parameters:
           #. groupAxis (dict): The group axis. Only one key is allowed.
              If key is fixed, value must be a list, tuple or a numpy.array
              of a vector such as [X,Y,Z]. If key is symmetry, in this case
              the group axis is computed as one of the three symmetry axis of
              the group atoms. the value must be even 0, 1 or 2 for
              respectively the first, second and tertiary symmetry axis.
        """
        assert isinstance(groupAxis,
                          dict), LOGGER.error("groupAxis must be a dictionary")
        assert len(groupAxis) == 1, LOGGER.error(
            "groupAxis must have a single key")
        key = groupAxis.keys()[0]
        val = groupAxis[key]
        if key == "fixed":
            self.__mustComputeGroupAxis = False
            assert isinstance(
                val,
                (list, set, tuple,
                 np.ndarray)), LOGGER.error("groupAxis value must be a list")
            if isinstance(val, np.ndarray):
                assert len(val.shape) == 1, LOGGER.error(
                    "groupAxis value must have a single dimension")
            val = list(val)
            assert len(val) == 3, LOGGER.error(
                "groupAxis fixed value must be a vector")
            for v in val:
                assert is_number(v), LOGGER.error(
                    "groupAxis value item must be numbers")
            val = np.array([FLOAT_TYPE(v) for v in val], dtype=FLOAT_TYPE)
            norm = FLOAT_TYPE(np.sqrt(np.sum(val**2)))
            val /= norm
        elif key == "symmetry":
            self.__mustComputeGroupAxis = True
            assert is_integer(val), LOGGER.error(
                "groupAxis symmetry value must be an integer")
            val = INT_TYPE(val)
            assert val >= 0 and val < 3, LOGGER.error(
                "groupAxis symmetry value must be positive smaller than 3")
        else:
            self.__mustComputeGroupAxis = None
            raise Exception(
                LOGGER.error(
                    "groupAxis key must be either 'fixed' or 'symmetry'"))
        # set groupAxis
        self.__groupAxis = {key: val}
Ejemplo n.º 15
0
    def set_axis(self, axis):
        """
        Set the axis along which the translation will be performed.

        :Parameters:
            #. axis (list,set,tuple,numpy.ndarray): Translation axis vector.
        """
        assert isinstance(axis, (list,set,tuple,np.ndarray)), LOGGER.error("axis must be a list")
        axis = list(axis)
        assert len(axis)==3, LOGGER.error("axis list must have 3 items")
        for pos in axis:
            assert is_number(pos), LOGGER.error( "axis items must be numbers")
        axis = [FLOAT_TYPE(pos) for pos in axis]
        axis =  np.array(axis, dtype=FLOAT_TYPE)
        self.__axis = axis/FLOAT_TYPE( np.linalg.norm(axis) )
Ejemplo n.º 16
0
    def compute_standard_error(self, data):
        """
        Compute the standard error (StdErr) of data not satisfying constraint
        conditions.

        .. math::
            StdErr = \\sum \\limits_{i}^{C}
            ( \\beta_{i} - \\beta_{i}^{min} ) ^{2}
            \\int_{0}^{\\beta_{i}^{min}} \\delta(\\beta-\\beta_{i}) d \\beta
            +
            ( \\beta_{i} - \\beta_{i}^{max} ) ^{2}
            \\int_{\\beta_{i}^{max}}^{\\infty} \\delta(\\beta-\\beta_{i}) d \\beta

        Where:\n
        :math:`C` is the total number of defined bonds constraints. \n
        :math:`\\beta_{i}^{min}` is the bond constraint lower limit set for constraint i. \n
        :math:`\\beta_{i}^{max}` is the bond constraint upper limit set for constraint i. \n
        :math:`\\beta_{i}` is the bond length computed for constraint i. \n
        :math:`\\delta` is the Dirac delta function. \n
        :math:`\\int_{0}^{\\beta_{i}^{min}} \\delta(\\beta-\\beta_{i}) d \\beta`
        is equal to 1 if :math:`0 \\leqslant \\beta_{i} \\leqslant \\beta_{i}^{min}` and 0 elsewhere.\n
        :math:`\\int_{\\beta_{i}^{max}}^{\\pi} \\delta(\\beta-\\beta_{i}) d \\beta`
        is equal to 1 if :math:`\\beta_{i}^{max} \\leqslant \\beta_{i} \\leqslant \\infty` and 0 elsewhere.\n

        :Parameters:
            #. data (object): Data to compute standardError.

        :Returns:
            #. standardError (number): The calculated standardError of the
            given.
        """
        return FLOAT_TYPE(np.sum(data["reducedLengths"]**2))
Ejemplo n.º 17
0
 def _runtime_initialize(self):
     """
     Automatically sets the selector order at the engine runtime.
     """
     diffs = np.array([(np.sum(self.engine.realCoordinates[g.indexes], axis=0)/len(g))-self.__center for g in self.engine.groups], dtype=FLOAT_TYPE)
     dists = np.array([np.sqrt(np.add.reduce(diff**2)) for diff in diffs])
     order = np.argsort(dists).astype(INT_TYPE)
     if self.__expand:
         order = [o for o in reversed(order)]
     # set order
     self.set_order(order)
     # set groups move generators
     if self.__adjustMoveGenerators:
         from fullrmc.Core.MoveGenerator import MoveGeneratorCollector
         from fullrmc.Generators.Rotations import RotationGenerator
         from fullrmc.Generators.Translations import TranslationTowardsCenterGenerator
         TG_amp  = self.__generatorsParams['TG']['amplitude']
         TG_ang  = self.__generatorsParams['TG']['angle']
         TG_dam  = self.__generatorsParams['TG']['damping']
         RG_ang  = self.__generatorsParams['RG']['amplitude']
         maxDist = FLOAT_TYPE(np.max(dists))
         TG_ampInterval = TG_amp-TG_amp*TG_dam
         for idx in range(len(self.engine.groups)):
             g = self.engine.groups[idx]
             damping = ((maxDist-dists[idx])/maxDist)*TG_ampInterval
             coll = [TranslationTowardsCenterGenerator(center={"fixed":self.__center}, amplitude=TG_amp-damping, angle=TG_ang, direction=not self.__expand)]
             if len(g) > 1:
                 coll.append(RotationGenerator(amplitude=RG_ang))
             mg = MoveGeneratorCollector(collection=coll, randomize=True)
             g.set_move_generator( mg )
Ejemplo n.º 18
0
 def set_generators_parameters(self, generatorsParams):
     """
     Set move generators parameters.
     
     #. generatorsParams (None, dict): The automatically created moves generators parameters.
        If None is given, default parameters are used. If a dictionary is given, only two keys are allowed.
        'TG' key is for TranslationTowardsCenterGenerator parameters and 'RG' key is
        for RotationGenerator parameters. TranslationTowardsCenterGenerator amplitude parameter
        is not the same for all groups but intelligently allowing certain groups to move more than
        others according to damping parameter.
        
        **Parameters are the following:**\n
        * TG_amp = generatorsParams['TG']['amplitude']: Used for TranslationTowardsCenterGenerator amplitude parameters.
        * TG_ang = generatorsParams['TG']['angle']: Used as TranslationTowardsCenterGenerator angle parameters.
        * TG_dam = generatorsParams['TG']['damping']: Also used for TranslationTowardsCenterGenerator amplitude parameters.
        * RG_ang = generatorsParams['RG']['amplitude']: Used as RotationGenerator angle parameters.
        
        **Parameters are used as the following:**\n
        * TG = TranslationTowardsCenterGenerator(center={"fixed":center}, amplitude=AMPLITUDE, angle=TG_ang)\n
          Where TG_amp < AMPLITUDE < TG_amp.TG_dam
        * RG = RotationGenerator(amplitude=RG_ang)         
        * MoveGeneratorCollector(collection=[TG,RG], randomize=True)
        
        **NB: The parameters are not checked for errors until engine runtime.** 
     """
     if generatorsParams is None:
         generatorsParams = {}
     assert isinstance(
         generatorsParams,
         dict), LOGGER.error("generatorsParams must be a python dictionary")
     newGenParams = {
         "TG": {
             "amplitude": 0.1,
             "damping": 0.1,
             "angle": 90
         },
         "RG": {
             "amplitude": 10
         }
     }
     # update  TranslationTowardsCenterGenerator values
     for gkey in newGenParams.keys():
         if not generatorsParams.has_key(gkey):
             continue
         assert isinstance(generatorsParams[gkey], dict), LOGGER.error(
             "generatorsParams value must be a python dictionary")
         for key in newGenParams[gkey].keys():
             newGenParams[gkey][key] = generatorsParams[gkey].get(
                 key, newGenParams[gkey][key])
     # check generatorsParams damping parameters
     assert is_number(generatorsParams["TG"]["damping"]), LOGGER.error(
         "generatorsParams['TG']['damping'] must be a number")
     generatorsParams["TG"]["damping"] = FLOAT_TYPE(
         generatorsParams["TG"]["damping"])
     assert generatorsParams["TG"]["damping"] >= 0, LOGGER.error(
         "generatorsParams['TG']['damping'] must be bigger than 0")
     assert generatorsParams["TG"]["damping"] <= 1, LOGGER.error(
         "generatorsParams['TG']['damping'] must be smaller than 1")
     # set generatorsParams
     self.__generatorsParams = newGenParams
Ejemplo n.º 19
0
 def set_angle(self, angle):
     """
     Sets the tolerance maximum angle.
     
     :Parameters:
         #. angle (None, number): The maximum tolerance angle in degrees between a generated translation vector and the computed direction. 
            If None is given, all generated translation vectors will be along the direction to center.        
     """
     if angle is not None:
         assert is_number(angle), LOGGER.error("angle must be numbers")
         assert angle >= 0, LOGGER.error("angle must be positive")
         assert angle <= 360, LOGGER.error("angle must be smaller than 360")
         if FLOAT_TYPE(angle) == FLOAT_TYPE(0.0):
             angle = None
         else:
             angle = FLOAT_TYPE(angle) * PI / FLOAT_TYPE(180.)
     self.__angle = angle
Ejemplo n.º 20
0
    def transform_coordinates(self, coordinates, argument=None):
        """
        Rotate coordinates.

        :Parameters:
            #. coordinates (np.ndarray): The coordinates on which to apply
               the rotation.
            #. argument (object): Not used here.

        :Returns:
            #. coordinates (np.ndarray): The new coordinates after applying
               the rotation.
        """
        if coordinates.shape[0] <= 1:
            # atoms where removed, fall back to random translation
            return coordinates + generate_random_vector(
                minAmp=self.__amplitude[0], maxAmp=self.__amplitude[1])
        else:
            # create flip flag
            if self.__flip is None:
                flip = FLOAT_TYPE(np.sign(1 - 2 * generate_random_float()))
            elif self.__flip:
                flip = FLOAT_TYPE(-1)
            else:
                flip = FLOAT_TYPE(1)
            # get group axis
            groupAxis = self.__get_group_axis__(coordinates)
            # get align axis within offset angle
            orientationAxis = flip * self.__get_orientation_axis__()
            orientationAxis = generate_vectors_in_solid_angle(
                direction=orientationAxis,
                maxAngle=self.__maximumOffsetAngle,
                numberOfVectors=1)[0]
            # get coordinates center
            center = np.array(np.sum(coordinates, 0) / coordinates.shape[0],
                              dtype=FLOAT_TYPE)
            # translate to origin
            rotatedCoordinates = coordinates - center
            # align coordinates
            rotatedCoordinates = orient(rotatedCoordinates, groupAxis,
                                        orientationAxis)
            # translate back to center and return rotated coordinates
            return np.array(rotatedCoordinates + center, dtype=FLOAT_TYPE)
Ejemplo n.º 21
0
    def transform_coordinates(self, coordinates, argument=None):
        """
        Translate coordinates.

        :Parameters:
            #. coordinates (np.ndarray): The coordinates on which to apply
               the translation.

        :Returns:
            #. coordinates (np.ndarray): The new coordinates after applying
               the translation.
            #. argument (object): Any python object. Not used in this
               generator.
        """
        if coordinates.shape[0] != 2:
            # atoms where removed, fall back to random translation
            return coordinates + generate_random_vector(
                minAmp=self.__amplitude[0], maxAmp=self.__amplitude[1])
        else:
            # get normalized direction vector
            vector = FLOAT_TYPE(coordinates[0, :] - coordinates[1, :])
            vector /= FLOAT_TYPE(np.linalg.norm(vector))
            # create amplitudes
            if self.__symmetric:
                amp0 = amp1 = FLOAT_TYPE(generate_random_float() *
                                         self.__amplitude)
            else:
                amp0 = FLOAT_TYPE(generate_random_float() * self.__amplitude)
                amp1 = FLOAT_TYPE(generate_random_float() * self.__amplitude)
            # create shrink flag
            if self.__shrink is None:
                shrink = (1 - 2 * generate_random_float()) > 0
            else:
                shrink = self.__shrink
            # create directions
            if shrink:
                dir0 = FLOAT_TYPE(-1)
                dir1 = FLOAT_TYPE(1)
            else:
                dir0 = FLOAT_TYPE(1)
                dir1 = FLOAT_TYPE(-1)
            # create translation vectors
            translationVectors = np.empty((2, 3), dtype=FLOAT_TYPE)
            translationVectors[0, :] = self.__agitate[0] * dir0 * amp0 * vector
            translationVectors[1, :] = self.__agitate[1] * dir1 * amp1 * vector
            # translate and return
            return coordinates + translationVectors
Ejemplo n.º 22
0
 def _set_maximum_standard_error(self, maximumStandardError):
     """ Sets the maximum standard error. Use carefully, it's not meant to be used externally.
     maximum squared deviation is what is used to compute the ratio and compare to thresholdRatio.
     """
     if (maximumStandardError is not None) and maximumStandardError:
         assert is_number(maximumStandardError), LOGGER.error("maximumStandardError must be a number.")
         maximumStandardError = FLOAT_TYPE(maximumStandardError)
         assert maximumStandardError>0, LOGGER.error("maximumStandardError must be a positive.")
     self.__maximumStandardError = maximumStandardError
     # dump to repository
     self._dump_to_repository({'_QuasiRigidConstraint__maximumStandardError': self.__maximumStandardError})
Ejemplo n.º 23
0
 def listen(self, message, argument=None):
     """   
     Listens to any message sent from the Broadcaster.
     
     :Parameters:
         #. message (object): Any python object to send to constraint's listen method.
         #. argument (object): Any type of argument to pass to the listeners.
     """
     if message in (
             "engine set",
             "update boundary conditions",
     ):
         # set angles and reset constraint
         AL = [
             self.__anglesList[0], self.__anglesList[1],
             self.__anglesList[2],
             [a * FLOAT_TYPE(180.) / PI for a in self.__anglesList[3]],
             [a * FLOAT_TYPE(180.) / PI for a in self.__anglesList[4]]
         ]
         self.set_angles(AL, tform=False)
Ejemplo n.º 24
0
 def normalize_path(self, path):
     """
     Transforms all path distances to floating numbers.
     
     :Parameters:
         #. path (list): The list of moves.
     
     :Returns:
         #. path (list): The list of moves.
     """
     return [FLOAT_TYPE(distance) for distance in path]
Ejemplo n.º 25
0
Archivo: run.py Proyecto: zizai/fullrmc
 def transform_coordinates(self, coordinates, argument=None):
     # generate random vector and ensure it is not zero
     vector = np.array(1 - 2 * np.random.random(3), dtype=FLOAT_TYPE)
     vector[2] = 0
     norm = np.linalg.norm(vector)
     if norm == 0:
         while norm == 0:
             vector = np.array(1 - 2 * np.random.random(3),
                               dtype=FLOAT_TYPE)
             vector[2] = 0
             norm = np.linalg.norm(vector)
     # normalize vector
     vector /= FLOAT_TYPE(norm)
     # compute baseVector
     baseVector = FLOAT_TYPE(vector * self.amplitude[0])
     # amplify vector
     maxAmp = FLOAT_TYPE(self.amplitude[1] - self.amplitude[0])
     vector *= FLOAT_TYPE(generate_random_float() * maxAmp)
     vector += baseVector
     # translate and return
     return coordinates + vector
Ejemplo n.º 26
0
 def set_scale_factor(self, scaleFactor):
     """
     Sets the scale factor.
     
     :Parameters:
          #. scaleFactor (number): A normalization scale factor used to normalize the computed data to the experimental ones.
     """
     assert is_number(scaleFactor), LOGGER.error("scaleFactor must be a number")
     self.__scaleFactor = FLOAT_TYPE(scaleFactor)
     # dump to repository
     self._dump_to_repository({'_ExperimentalConstraint__scaleFactor' :self.__scaleFactor})
     ## reset constraint
     self.reset_constraint()
Ejemplo n.º 27
0
 def set_amplitude(self, amplitude):
     """
     Sets maximum translation vector allowed amplitude.
     
     :Parameters:
         #. amplitude (number): the maximum allowed translation vector amplitude.
     """
     assert is_number(amplitude), LOGGER.error(
         "Translation amplitude must be a number")
     amplitude = float(amplitude)
     assert amplitude > 0, LOGGER.error(
         "Translation amplitude must be bigger than 0")
     self.__amplitude = FLOAT_TYPE(amplitude)
Ejemplo n.º 28
0
 def set_bias_factor(self, biasFactor):
     """
     Set the biasing factor.
 
     :Parameters:
         #. biasFactor (Number): The biasing factor of every group when a step get accepted.
            Must be a positive number.
     """
     assert is_number(biasFactor), LOGGER.error(
         "biasFactor must be a number")
     biasFactor = FLOAT_TYPE(biasFactor)
     assert biasFactor >= 0, LOGGER.error("biasFactor must be positive")
     self.__biasFactor = biasFactor
    def compute_as_if_amputated(self, realIndex, relativeIndex):
        """
        Compute and return constraint's data and standard error as if
        given atom is amputated.

        :Parameters:
            #. realIndex (numpy.ndarray): Atom's index as a numpy array
               of a single element.
            #. relativeIndex (numpy.ndarray): Atom's relative index as a
               numpy array of a single element.
        """
        # compute data
        self.compute_before_move(realIndexes=realIndex,
                                 relativeIndexes=relativeIndex)
        dataIntra = self.data["intra"] - self.activeAtomsDataBeforeMove["intra"]
        dataInter = self.data["inter"] - self.activeAtomsDataBeforeMove["inter"]
        data = {"intra": dataIntra, "inter": dataInter}
        # temporarily adjust self.__weightingScheme
        weightingScheme = self.weightingScheme
        #relativeIndex   = self.engine._atomsCollector.get_relative_index(relativeIndex[0])
        relativeIndex = relativeIndex[0]
        selectedElement = self.engine.allElements[relativeIndex]
        self.engine.numberOfAtomsPerElement[selectedElement] -= 1
        #elementsWeight        = dict([(el,float(get_element_property(el,self.__weighting))) for el in self.engine.elements])
        WS = get_normalized_weighting(
            numbers=self.engine.numberOfAtomsPerElement,
            weights=self._elementsWeight)
        for k, v in WS.items():
            WS[k] = FLOAT_TYPE(v)
        self._set_weighting_scheme(WS)
        # compute standard error
        if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
            SF = self.adjustScaleFactorFrequency
            self._set_adjust_scale_factor_frequency(0)
        totalPCF = self.__get_total_gr(data)
        standardError = self.compute_standard_error(modelData=totalPCF)
        if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
            self._set_adjust_scale_factor_frequency(SF)
        # reset activeAtoms data
        self.set_active_atoms_data_before_move(None)
        # set data
        self.set_amputation_data({
            'data': data,
            'weightingScheme': self.weightingScheme
        })
        self.set_amputation_standard_error(standardError)
        # reset weightingScheme
        self._set_weighting_scheme(weightingScheme)
        self.engine.numberOfAtomsPerElement[selectedElement] += 1
Ejemplo n.º 30
0
 def set_reject_probability(self, rejectProbability):
     """
     Set the rejection probability.
     
     :Parameters:
         #. rejectProbability (Number): rejecting probability of all steps where standardError increases. 
            It must be between 0 and 1 where 1 means rejecting all steps where standardError increases
            and 0 means accepting all steps regardless whether standardError increases or not.
     """
     assert is_number(rejectProbability), LOGGER.error("rejectProbability must be a number")
     rejectProbability = FLOAT_TYPE(rejectProbability)
     assert rejectProbability>=0 and rejectProbability<=1, LOGGER.error("rejectProbability must be between 0 and 1")
     self.__rejectProbability = rejectProbability
     # dump to repository
     self._dump_to_repository({'_RigidConstraint__dataWeights': self.__rejectProbability})