class DispositionTypeParams(BaseModel):

    allowChangeTimer = properties.Bool(
        'Whether the agent can change the redial timer for this disposition.',
    )
    attempts = properties.Integer('Number of redial attempts.', )
    timer = properties.Instance(
        'Redial timer.',
        instance_class=Timer,
    )
    useTimer = properties.Bool(
        'Whether this disposition uses a redial timer.', )
class CustomField(BaseModel):
    """This represents optional data that can defined for specific mailbox
    and filled when creating or updating a Conversation."""

    field_name = properties.String(
        'The name of the field; note that this may change if a field '
        'is renamed, but the ``id`` will not.',
        required=True,
    )
    field_type = properties.StringChoice(
        'Type of the field.',
        choices=[
            'SINGLE_LINE',
            'MULTI_LINE',
            'DATA',
            'NUMBER',
            'DROPDOWN',
        ],
        default='SINGLE_LINE',
        required=True,
    )
    required = properties.Bool('Flag for UI to mark the field as required.', )
    order = properties.Integer(
        'Relative order of the custom field. Can be ``null`` or a number '
        'between ``0`` and ``255``.',
        min=0,
        max=255,
    )
    options = properties.List(
        'Field options',
        prop=Option,
    )
Beispiel #3
0
class BaseWaveform(properties.HasProperties):

    hasInitialFields = properties.Bool(
        "Does the waveform have initial fields?", default=False
    )

    offTime = properties.Float(
        "off-time of the source", default=0.
    )

    eps = properties.Float(
        "window of time within which the waveform is considered on",
        default=1e-9
    )

    def __init__(self, **kwargs):
        Utils.setKwargs(self, **kwargs)

    def _assertMatchesPair(self, pair):
        assert isinstance(self, pair), (
            "Waveform object must be an instance of a %s "
            "BaseWaveform class.".format(pair.__name__)
        )

    def eval(self, time):
        raise NotImplementedError

    def evalDeriv(self, time):
        raise NotImplementedError  # needed for E-formulation
Beispiel #4
0
 class HasOptionalUnion(properties.HasProperties):
     mybc = properties.Union(
         'union of bool or color',
         props=[properties.Bool(''),
                properties.Color('')],
         required=False,
     )
Beispiel #5
0
 class HasOptPropsUnion(properties.HasProperties):
     mybc = properties.Union(
         'union of bool or color',
         props=[
             properties.Bool('', required=False),
             properties.Color('', required=False),
         ],
         required=True,
     )
Beispiel #6
0
class SimpleSmoothDeriv(BaseRegularization):
    """
    Base Simple Smooth Regularization. This base class regularizes on the first
    spatial derivative, not considering length scales, in the provided
    orientation

    **Optional Inputs**

    :param BaseMesh mesh: SimPEG mesh
    :param int nP: number of parameters
    :param IdentityMap mapping: regularization mapping, takes the model from model space to the space you want to regularize in
    :param numpy.ndarray mref: reference model
    :param numpy.ndarray indActive: active cell indices for reducing the size of differential operators in the definition of a regularization mesh
    :param numpy.ndarray cell_weights: cell weights
    :param bool mrefInSmooth: include the reference model in the smoothness computation? (eg. look at Deriv of m (False) or Deriv of (m-mref) (True))
    :param numpy.ndarray cell_weights: vector of cell weights (applied in all terms)
    """
    def __init__(self, mesh, orientation='x', **kwargs):

        self.orientation = orientation
        assert self.orientation in ['x', 'y', 'z'
                                    ], ("Orientation must be 'x', 'y' or 'z'")

        if self.orientation == 'y':
            assert mesh.dim > 1, (
                "Mesh must have at least 2 dimensions to regularize along the "
                "y-direction")

        elif self.orientation == 'z':
            assert mesh.dim > 2, (
                "Mesh must have at least 3 dimensions to regularize along the "
                "z-direction")

        super(SimpleSmoothDeriv, self).__init__(mesh=mesh, **kwargs)

    mrefInSmooth = properties.Bool(
        "include mref in the smoothness calculation?", default=False)

    @property
    def _multiplier_pair(self):
        return 'alpha_{orientation}'.format(orientation=self.orientation)

    @property
    def W(self):
        """
        Weighting matrix that takes the first spatial difference (no
        length scales considered) in the specified orientation
        """
        W = getattr(
            self.regmesh, "cellDiff{orientation}Stencil".format(
                orientation=self.orientation))
        if self.cell_weights is not None:
            Ave = getattr(self.regmesh, 'aveCC2F{}'.format(self.orientation))
            W = (Utils.sdiag((Ave * self.cell_weights)**0.5) * W)
        return W
Beispiel #7
0
class DomainConditionBoolean(DomainCondition):
    """This represents an integer query."""

    value = properties.Bool(
        'Boolean Value',
        required=True,
    )

    def __str__(self):
        """Return a string usable as a query part in an API request."""
        value = 'true' if self.value else 'false'
        return '%s:%s' % (self.field_name, value)
class GlobalAEMSurvey(Survey.BaseSurvey, properties.HasProperties):

    # This assumes a multiple sounding locations
    rx_locations = properties.Array("Receiver locations ",
                                    dtype=float,
                                    shape=('*', 3))
    src_locations = properties.Array("Source locations ",
                                     dtype=float,
                                     shape=('*', 3))
    topo = properties.Array("Topography", dtype=float, shape=('*', 3))

    half_switch = properties.Bool("Switch for half-space", default=False)

    _pred = None

    @Utils.requires('prob')
    def dpred(self, m, f=None):
        """
            Return predicted data.
            Predicted data, (`_pred`) are computed when
            self.prob.fields is called.
        """
        if f is None:
            f = self.prob.fields(m)

        return self._pred

    @property
    def n_sounding(self):
        """
            # of Receiver locations
        """
        return self.rx_locations.shape[0]

    def read_xyz_data(self, fname):
        """
        Read csv file format
        This is a place holder at this point
        """
        pass

    @property
    def nD(self):
        # Need to generalize this for the dual moment data
        if getattr(self, '_nD', None) is None:
            self._nD = self.nD_vec.sum()
        return self._nD
Beispiel #9
0
class BaseSparse(BaseRegularization):
    """
    Base class for building up the components of the Sparse Regularization
    """
    def __init__(self, mesh, **kwargs):
        self._stashedR = None
        super(BaseSparse, self).__init__(mesh=mesh, **kwargs)

    model = properties.Array("current model", dtype=float)

    epsilon = properties.Float("Threshold value for the model norm",
                               default=1e-3,
                               required=True)

    norm = properties.Array("norm used", dtype=float)

    space = properties.String("By default inherit the objctive",
                              default='linear')

    gradientType = properties.String("type of gradient", default='components')

    scale = properties.Array(
        "General nob for scaling",
        dtype=float,
    )

    # Give the option to scale or not
    scaledIRLS = properties.Bool("Scale the gradients of the IRLS norms",
                                 default=True)

    @properties.validator('scale')
    def _validate_scale(self, change):
        if change['value'] is not None:
            # todo: residual size? we need to know the expected end shape
            if self._nC_residual != '*':
                assert len(change['value']) == self._nC_residual, (
                    'scale must be length {} not {}'.format(
                        self._nC_residual, len(change['value'])))

    @property
    def stashedR(self):
        return self._stashedR

    @stashedR.setter
    def stashedR(self, value):
        self._stashedR = value
Beispiel #10
0
class SearchConversation(BaseConversation):
    """This represents a conversation as returned by search results."""

    mailbox_id = properties.Integer(
        'The ID of the mailbox this conversation is in.',
        required=True,
    )
    customer_name = properties.String(
        'Name of the customer this conversation is regarding.',
        required=True,
    )
    customer_email = properties.String(
        'Email address of the customer',
        required=True,
    )
    has_attachments = properties.Bool(
        '``True`` when the conversation has at least one attachment.', )
Beispiel #11
0
class BaseWaveform(properties.HasProperties):

    hasInitialFields = properties.Bool(
        "Does the waveform have initial fields?", default=False)

    offTime = properties.Float("off-time of the source", default=0.0)

    eps = properties.Float(
        "window of time within which the waveform is considered on",
        default=1e-9)

    def __init__(self, **kwargs):
        setKwargs(self, **kwargs)

    def eval(self, time):
        raise NotImplementedError

    def evalDeriv(self, time):
        raise NotImplementedError  # needed for E-formulation
Beispiel #12
0
class SparseDeriv(BaseSparse):
    """
    Base Class for sparse regularization on first spatial derivatives
    """
    def __init__(self, mesh, orientation='x', **kwargs):

        self.orientation = orientation
        super(SparseDeriv, self).__init__(mesh=mesh, **kwargs)

    mrefInSmooth = properties.Bool(
        "include mref in the smoothness calculation?", default=False)

    @property
    def _multiplier_pair(self):
        return 'alpha_{orientation}'.format(orientation=self.orientation)

    @property
    def f_m(self):
        return self.cellDiffStencil * (self.mapping * self.model)

    @property
    def cellDiffStencil(self):
        return getattr(self.regmesh,
                       'cellDiff{}Stencil'.format(self.orientation))

    @property
    def W(self):

        Ave = getattr(self.regmesh, 'aveCC2F{}'.format(self.orientation))

        if getattr(self, 'model', None) is None:
            R = Utils.speye(self.cellDiffStencil.shape[0])

        else:
            r = self.R(self.f_m)  # , self.eps_q, self.norm)
            R = Utils.sdiag(r)

        if self.cell_weights is not None:
            return (Utils.sdiag(
                (self.gamma * (Ave * self.cell_weights))**0.5) * R *
                    self.cellDiffStencil)
        return ((self.gamma)**0.5) * R * self.cellDiffStencil
Beispiel #13
0
    def test_bool(self):
        class BoolOpts(properties.HasProperties):
            mybool = properties.Bool('My bool')

        opt = BoolOpts(mybool=True)
        assert opt.mybool is True
        self.assertRaises(ValueError, lambda: setattr(opt, 'mybool', 'true'))
        opt.mybool = False
        assert opt.mybool is False

        assert properties.Bool('').equal(True, True)
        assert not properties.Bool('').equal(True, 1)
        assert not properties.Bool('').equal(True, 'true')

        json = properties.Bool.to_json(opt.mybool)
        assert not json
        assert not properties.Bool.from_json(json)
        with self.assertRaises(ValueError):
            properties.Bool.from_json({})
        with self.assertRaises(ValueError):
            properties.Bool.from_json('nope')
        assert properties.Bool.from_json('true')
        assert properties.Bool.from_json('y')
        assert properties.Bool.from_json('Yes')
        assert properties.Bool.from_json('ON')
        assert not properties.Bool.from_json('false')
        assert not properties.Bool.from_json('N')
        assert not properties.Bool.from_json('no')
        assert not properties.Bool.from_json('OFF')

        self.assertEqual(opt.serialize(include_class=False), {'mybool': False})

        assert BoolOpts.deserialize({'mybool': 'Y'}).mybool
        assert BoolOpts._props['mybool'].deserialize(None) is None

        assert properties.Bool('').equal(True, True)
        assert not properties.Bool('').equal(True, 1)
        assert not properties.Bool('').equal(True, 'true')

        with self.assertRaises(ValueError):
            BoolOpts._props['mybool'].assert_valid(opt, 'true')

        opt.validate()
        opt._backend['mybool'] = 'true'
        with self.assertRaises(ValueError):
            opt.validate()
Beispiel #14
0
 class HasOptionalSet(properties.HasProperties):
     myset = properties.Set('',
                            properties.Bool(''),
                            required=False,
                            observe_mutations=om)
Beispiel #15
0
 class HasOptPropList(properties.HasProperties):
     mylist = properties.List(
         doc='',
         prop=properties.Bool('', required=False),
         default=properties.undefined,
     )
Beispiel #16
0
 class HasOptionalList(properties.HasProperties):
     mylist = properties.List('',
                              properties.Bool(''),
                              required=False,
                              observe_mutations=om)
Beispiel #17
0
 class HasOptPropTuple(properties.HasProperties):
     mytuple = properties.Tuple(
         doc='',
         prop=properties.Bool('', required=False),
         default=properties.undefined,
     )
Beispiel #18
0
 class HasOptionalTuple(properties.HasProperties):
     mytuple = properties.Tuple('', properties.Bool(''), required=False)
Beispiel #19
0
class Base(properties.HasProperties):

    check_accuracy = properties.Bool(
        "check the accuracy of the solve?",
        default = False
    )

    accuracy_tol = properties.Float(
        "tolerance on the accuracy of the solver",
        default=1e-6
    )

    def __init__(self, A):
        self.A = A.tocsr()

    def set_kwargs(self, ignore=None,  **kwargs):
        """
            Sets key word arguments (kwargs) that are present in the object,
            throw an error if they don't exist.
        """
        if ignore is None:
            ignore = []
        for attr in kwargs:
            if attr in ignore:
                continue
            if hasattr(self, attr):
                setattr(self, attr, kwargs[attr])
            else:
                raise Exception('{0!s} attr is not recognized'.format(attr))

    @property
    def _transposeClass(self):
        return self.__class__

    @property
    def T(self):
        if self._transposeClass is None:
            raise Exception(
                'The transpose for the {} class is not possible.'.format(
                    self.__name__
                )
            )
        newS = self._transposeClass(self.A.T)
        return newS

    def _compute_accuracy(self, rhs, x):
        nrm = np.linalg.norm(np.ravel(self.A*x - rhs), np.inf)
        nrm_rhs = np.linalg.norm(np.ravel(rhs), np.inf)
        if nrm_rhs > 0:
            nrm /= nrm_rhs
        if nrm > self.accuracy_tol:
            msg = 'Accuracy on solve is above tolerance: {0:e} > {1:e}'.format(
                nrm, self.accuracy_tol
            )
            raise Exception(msg)

    def _solve(self, rhs):

        n = self.A.shape[0]
        assert rhs.size % n == 0, 'Incorrect shape of rhs.'
        nrhs = rhs.size // n

        if len(rhs.shape) == 1 or rhs.shape[1] == 1:
            x = self._solve1(rhs)
        else:
            x = self._solveM(rhs)

        if self.check_accuracy:
            self._compute_accuracy(rhs, x)

        if nrhs == 1:
            return x.flatten()
        elif nrhs > 1:
            return x.reshape((n, nrhs), order='F')

    def clean(self):
        pass

    def __mul__(self, val):
        if type(val) is np.ndarray:
            return self._solve(val)
        raise TypeError('Can only multiply by a numpy array.')

    @property
    def is_real(self):
        return self.A.dtype == float

    @property
    def is_symmetric(self):
        return getattr(self, '_is_symmetric', False)

    @is_symmetric.setter
    def is_symmetric(self, value):
        self._is_symmetric = value

    @property
    def is_hermitian(self):
        if self.is_real and self.is_symmetric:
            return True
        else:
            return getattr(self, '_is_hermitian', False)

    @is_hermitian.setter
    def is_hermitian(self, value):
        self._is_hermitian = value

    @property
    def is_positive_definite(self):
        return getattr(self, '_is_positive_definite', False)

    @is_positive_definite.setter
    def is_positive_definite(self, value):
        self._is_positive_definite = value
Beispiel #20
0
class RichardsProblem(Problem.BaseTimeProblem):
    """docstring for RichardsProblem"""

    modelMap = properties.Property("the mapping")

    boundaryConditions = properties.Array("boundary conditions.")
    initialConditions = properties.Array("boundary conditions.")

    surveyPair = RichardsSurvey
    mapPair = RichardsMap

    debug = properties.Bool("Show all messages")

    Solver = Solver
    solverOpts = {}

    def __init__(self, mesh, modelMap=None, **kwargs):
        assert (isinstance(
            modelMap,
            self.mapPair)), ('modelMap must be a {} class not {}'.format(
                self.mapPair.__class__.__name__, modelMap))
        self.modelMap = modelMap
        Problem.BaseTimeProblem.__init__(self, mesh, **kwargs)

    def getBoundaryConditions(self, ii, u_ii):
        if type(self.boundaryConditions) is np.ndarray:
            return self.boundaryConditions

        time = self.timeMesh.vectorCCx[ii]

        return self.boundaryConditions(time, u_ii)

    method = properties.StringChoice(
        "Formulation used, See notes in Celia et al., 1990.",
        choices=['mixed', 'head'])

    doNewton = properties.Bool("Do a Newton iteration vs. a Picard iteration ",
                               default=False)

    maxIterRootFinder = properties.Integer(
        "Maximum iterations for rootFinder iteration.", default=30)

    tolRootFinder = properties.Float(
        "Maximum iterations for rootFinder iteration.", default=1e-4)

    @properties.observer(['doNewton', 'maxIterRootFinder', 'tolRootFinder'])
    def _on_root_finder_update(self, change):
        """
            Setting doNewton will clear the rootFinder,
            which will be reinitialized when called
        """
        if hasattr(self, '_rootFinder'):
            del self._rootFinder

    @property
    def rootFinder(self):
        """Root-finding Algorithm"""
        if getattr(self, '_rootFinder', None) is None:
            self._rootFinder = Optimization.NewtonRoot(
                doLS=self.doNewton,
                maxIter=self.maxIterRootFinder,
                tol=self.tolRootFinder,
                Solver=self.Solver)
        return self._rootFinder

    @Utils.timeIt
    def fields(self, m):
        tic = time.time()
        u = list(range(self.nT + 1))
        u[0] = self.initialConditions
        for ii, dt in enumerate(self.timeSteps):
            bc = self.getBoundaryConditions(ii, u[ii])
            u[ii + 1] = self.rootFinder.root(
                lambda hn1m, return_g=True: self.getResidual(
                    m, u[ii], hn1m, dt, bc, return_g=return_g),
                u[ii])
            if self.debug:
                print("Solving Fields ({0:4d}/{1:d} - {2:3.1f}% Done) {3:d} "
                      "Iterations, {4:4.2f} seconds".format(
                          ii + 1, self.nT, 100.0 * (ii + 1) / self.nT,
                          self.rootFinder.iter,
                          time.time() - tic))
        return u

    @property
    def Dz(self):
        if self.mesh.dim == 1:
            Dz = self.mesh.faceDivx
        elif self.mesh.dim == 2:
            Dz = sp.hstack((Utils.spzeros(
                self.mesh.nC, self.mesh.vnF[0]), self.mesh.faceDivy),
                           format='csr')
        elif self.mesh.dim == 3:
            Dz = sp.hstack(
                (Utils.spzeros(self.mesh.nC, self.mesh.vnF[0] +
                               self.mesh.vnF[1]), self.mesh.faceDivz),
                format='csr')
        return Dz

    @Utils.timeIt
    def diagsJacobian(self, m, hn, hn1, dt, bc):

        DIV = self.mesh.faceDiv
        GRAD = self.mesh.cellGrad
        BC = self.mesh.cellGradBC
        AV = self.mesh.aveF2CC.T
        Dz = self.Dz

        dT = self.modelMap.thetaDerivU(hn, m)
        dT1 = self.modelMap.thetaDerivU(hn1, m)
        K1 = self.modelMap.k(hn1, m)
        dK1 = self.modelMap.kDerivU(hn1, m)
        dKm1 = self.modelMap.kDerivM(hn1, m)

        # Compute part of the derivative of:
        #
        #       DIV*diag(GRAD*hn1+BC*bc)*(AV*(1.0/K))^-1

        DdiagGh1 = DIV * Utils.sdiag(GRAD * hn1 + BC * bc)
        diagAVk2_AVdiagK2 = (Utils.sdiag(
            (AV * (1. / K1))**(-2)) * AV * Utils.sdiag(K1**(-2)))

        # The matrix that we are computing has the form:
        #
        #   -                                      -   -  -     -  -
        #  |  Adiag                                 | | h1 |   | b1 |
        #  |   Asub    Adiag                        | | h2 |   | b2 |
        #  |            Asub    Adiag               | | h3 | = | b3 |
        #  |                 ...     ...            | | .. |   | .. |
        #  |                         Asub    Adiag  | | hn |   | bn |
        #   -                                      -   -  -     -  -

        Asub = (-1.0 / dt) * dT

        Adiag = ((1.0 / dt) * dT1 - DdiagGh1 * diagAVk2_AVdiagK2 * dK1 -
                 DIV * Utils.sdiag(1. / (AV * (1. / K1))) * GRAD -
                 Dz * diagAVk2_AVdiagK2 * dK1)

        B = DdiagGh1 * diagAVk2_AVdiagK2 * dKm1 + Dz * diagAVk2_AVdiagK2 * dKm1

        return Asub, Adiag, B

    @Utils.timeIt
    def getResidual(self, m, hn, h, dt, bc, return_g=True):
        """
            Where h is the proposed value for the next time iterate (h_{n+1})
        """
        DIV = self.mesh.faceDiv
        GRAD = self.mesh.cellGrad
        BC = self.mesh.cellGradBC
        AV = self.mesh.aveF2CC.T
        Dz = self.Dz

        T = self.modelMap.theta(h, m)
        dT = self.modelMap.thetaDerivU(h, m)
        Tn = self.modelMap.theta(hn, m)
        K = self.modelMap.k(h, m)
        dK = self.modelMap.kDerivU(h, m)

        aveK = 1. / (AV * (1. / K))

        RHS = DIV * Utils.sdiag(aveK) * (GRAD * h + BC * bc) + Dz * aveK
        if self.method == 'mixed':
            r = (T - Tn) / dt - RHS
        elif self.method == 'head':
            r = dT * (h - hn) / dt - RHS

        if not return_g:
            return r

        J = dT / dt - DIV * Utils.sdiag(aveK) * GRAD
        if self.doNewton:
            DDharmAve = Utils.sdiag(aveK**2) * AV * Utils.sdiag(K**(-2)) * dK
            J = J - DIV * Utils.sdiag(GRAD * h +
                                      BC * bc) * DDharmAve - Dz * DDharmAve

        return r, J

    @Utils.timeIt
    def Jfull(self, m, f=None):
        if f is None:
            f = self.fields(m)

        nn = len(f) - 1
        Asubs, Adiags, Bs = list(range(nn)), list(range(nn)), list(range(nn))
        for ii in range(nn):
            dt = self.timeSteps[ii]
            bc = self.getBoundaryConditions(ii, f[ii])
            Asubs[ii], Adiags[ii], Bs[ii] = self.diagsJacobian(
                m, f[ii], f[ii + 1], dt, bc)
        Ad = sp.block_diag(Adiags)
        zRight = Utils.spzeros((len(Asubs) - 1) * Asubs[0].shape[0],
                               Adiags[0].shape[1])
        zTop = Utils.spzeros(Adiags[0].shape[0],
                             len(Adiags) * Adiags[0].shape[1])
        As = sp.vstack((zTop, sp.hstack((sp.block_diag(Asubs[1:]), zRight))))
        A = As + Ad
        B = np.array(sp.vstack(Bs).todense())

        Ainv = self.Solver(A, **self.solverOpts)
        P = self.survey.evalDeriv(f, m)
        AinvB = Ainv * B
        z = np.zeros((self.mesh.nC, B.shape[1]))
        zAinvB = np.vstack((z, AinvB))
        J = P * zAinvB
        return J

    @Utils.timeIt
    def Jvec(self, m, v, f=None):
        if f is None:
            f = self.fields(m)

        JvC = list(range(len(f) -
                         1))  # Cell to hold each row of the long vector

        # This is done via forward substitution.
        bc = self.getBoundaryConditions(0, f[0])
        temp, Adiag, B = self.diagsJacobian(m, f[0], f[1], self.timeSteps[0],
                                            bc)
        Adiaginv = self.Solver(Adiag, **self.solverOpts)
        JvC[0] = Adiaginv * (B * v)

        for ii in range(1, len(f) - 1):
            bc = self.getBoundaryConditions(ii, f[ii])
            Asub, Adiag, B = self.diagsJacobian(m, f[ii], f[ii + 1],
                                                self.timeSteps[ii], bc)
            Adiaginv = self.Solver(Adiag, **self.solverOpts)
            JvC[ii] = Adiaginv * (B * v - Asub * JvC[ii - 1])

        P = self.survey.evalDeriv(f, m)
        return P * np.concatenate([np.zeros(self.mesh.nC)] + JvC)

    @Utils.timeIt
    def Jtvec(self, m, v, f=None):
        if f is None:
            f = self.field(m)

        P = self.survey.evalDeriv(f, m)
        PTv = P.T * v

        # This is done via backward substitution.
        minus = 0
        BJtv = 0
        for ii in range(len(f) - 1, 0, -1):
            bc = self.getBoundaryConditions(ii - 1, f[ii - 1])
            Asub, Adiag, B = self.diagsJacobian(m, f[ii - 1], f[ii],
                                                self.timeSteps[ii - 1], bc)
            # select the correct part of v
            vpart = list(
                range((ii) * Adiag.shape[0], (ii + 1) * Adiag.shape[0]))
            AdiaginvT = self.Solver(Adiag.T, **self.solverOpts)
            JTvC = AdiaginvT * (PTv[vpart] - minus)
            minus = Asub.T * JTvC  # this is now the super diagonal.
            BJtv = BJtv + B.T * JTvC

        return BJtv
Beispiel #21
0
class Update_IRLS(InversionDirective):

    f_old = 0
    f_min_change = 1e-2
    beta_tol = 1e-1
    beta_ratio_l2 = None
    prctile = 100
    chifact_start = 1.0
    chifact_target = 1.0

    # Solving parameter for IRLS (mode:2)
    irls_iteration = 0
    minGNiter = 1
    max_irls_iterations = properties.Integer("maximum irls iterations",
                                             default=20)
    iterStart = 0
    sphericalDomain = False

    # Beta schedule
    update_beta = properties.Bool("Update beta", default=True)
    beta_search = properties.Bool("Do a beta serarch", default=False)
    coolingFactor = properties.Float("Cooling factor", default=2.0)
    coolingRate = properties.Integer("Cooling rate", default=1)
    ComboObjFun = False
    mode = 1
    coolEpsOptimized = True
    coolEps_p = True
    coolEps_q = True
    floorEps_p = 1e-8
    floorEps_q = 1e-8
    coolEpsFact = 1.2
    silent = False
    fix_Jmatrix = False

    maxIRLSiters = deprecate_property(
        max_irls_iterations,
        "maxIRLSiters",
        new_name="max_irls_iterations",
        removal_version="0.15.0",
    )
    updateBeta = deprecate_property(update_beta,
                                    "updateBeta",
                                    new_name="update_beta",
                                    removal_version="0.15.0")
    betaSearch = deprecate_property(beta_search,
                                    "betaSearch",
                                    new_name="beta_search",
                                    removal_version="0.15.0")

    @property
    def target(self):
        if getattr(self, "_target", None) is None:
            nD = 0
            for survey in self.survey:
                nD += survey.nD

            self._target = nD * 0.5 * self.chifact_target

        return self._target

    @target.setter
    def target(self, val):
        self._target = val

    @property
    def start(self):
        if getattr(self, "_start", None) is None:
            if isinstance(self.survey, list):
                self._start = 0
                for survey in self.survey:
                    self._start += survey.nD * 0.5 * self.chifact_start

            else:

                self._start = self.survey.nD * 0.5 * self.chifact_start
        return self._start

    @start.setter
    def start(self, val):
        self._start = val

    def initialize(self):

        if self.mode == 1:

            self.norms = []
            for reg in self.reg.objfcts:
                self.norms.append(reg.norms)
                reg.norms = np.c_[2.0, 2.0, 2.0, 2.0]
                reg.model = self.invProb.model

        # Update the model used by the regularization
        for reg in self.reg.objfcts:
            reg.model = self.invProb.model

        if self.sphericalDomain:
            self.angleScale()

    def endIter(self):

        if self.sphericalDomain:
            self.angleScale()

        # Check if misfit is within the tolerance, otherwise scale beta
        if np.all([
                np.abs(1.0 - self.invProb.phi_d / self.target) > self.beta_tol,
                self.update_beta,
                self.mode != 1,
        ]):

            ratio = self.target / self.invProb.phi_d

            if ratio > 1:
                ratio = np.mean([2.0, ratio])

            else:
                ratio = np.mean([0.75, ratio])

            self.invProb.beta = self.invProb.beta * ratio

            if np.all([self.mode != 1, self.beta_search]):
                print("Beta search step")
                # self.update_beta = False
                # Re-use previous model and continue with new beta
                self.invProb.model = self.reg.objfcts[0].model
                self.opt.xc = self.reg.objfcts[0].model
                self.opt.iter -= 1
                return

        elif np.all([self.mode == 1, self.opt.iter % self.coolingRate == 0]):

            self.invProb.beta = self.invProb.beta / self.coolingFactor

        phim_new = 0
        for reg in self.reg.objfcts:
            for comp, multipier in zip(reg.objfcts, reg.multipliers):
                if multipier > 0:
                    phim_new += np.sum(comp.f_m**2.0 /
                                       (comp.f_m**2.0 + comp.epsilon**2.0)
                                       **(1 - comp.norm / 2.0))

        # Update the model used by the regularization
        phi_m_last = []
        for reg in self.reg.objfcts:
            reg.model = self.invProb.model
            phi_m_last += [reg(self.invProb.model)]

        # After reaching target misfit with l2-norm, switch to IRLS (mode:2)
        if np.all([self.invProb.phi_d < self.start, self.mode == 1]):
            self.startIRLS()

        # Only update after GN iterations
        if np.all([(self.opt.iter - self.iterStart) % self.minGNiter == 0,
                   self.mode != 1]):

            if self.fix_Jmatrix:
                print(">> Fix Jmatrix")
                self.invProb.dmisfit.simulation.fix_Jmatrix = True
            # Check for maximum number of IRLS cycles
            if self.irls_iteration == self.max_irls_iterations:
                if not self.silent:
                    print("Reach maximum number of IRLS cycles:" +
                          " {0:d}".format(self.max_irls_iterations))

                self.opt.stopNextIteration = True
                return

            # Print to screen
            for reg in self.reg.objfcts:

                if reg.eps_p > self.floorEps_p and self.coolEps_p:
                    reg.eps_p /= self.coolEpsFact
                    # print('Eps_p: ' + str(reg.eps_p))
                if reg.eps_q > self.floorEps_q and self.coolEps_q:
                    reg.eps_q /= self.coolEpsFact
                    # print('Eps_q: ' + str(reg.eps_q))

            # Remember the value of the norm from previous R matrices
            # self.f_old = self.reg(self.invProb.model)

            self.irls_iteration += 1

            # Reset the regularization matrices so that it is
            # recalculated for current model. Do it to all levels of comboObj
            for reg in self.reg.objfcts:

                # If comboObj, go down one more level
                for comp in reg.objfcts:
                    comp.stashedR = None

            for dmis in self.dmisfit.objfcts:
                if getattr(dmis, "stashedR", None) is not None:
                    dmis.stashedR = None

            # Compute new model objective function value
            f_change = np.abs(self.f_old - phim_new) / (self.f_old + 1e-12)

            # Check if the function has changed enough
            if np.all([
                    f_change < self.f_min_change,
                    self.irls_iteration > 1,
                    np.abs(1.0 - self.invProb.phi_d / self.target) <
                    self.beta_tol,
            ]):

                print("Minimum decrease in regularization." + "End of IRLS")
                self.opt.stopNextIteration = True
                return

            self.f_old = phim_new

            self.update_beta = True
            self.invProb.phi_m_last = self.reg(self.invProb.model)

    def startIRLS(self):
        if not self.silent:
            print("Reached starting chifact with l2-norm regularization:" +
                  " Start IRLS steps...")

        self.mode = 2

        if getattr(self.opt, "iter", None) is None:
            self.iterStart = 0
        else:
            self.iterStart = self.opt.iter

        self.invProb.phi_m_last = self.reg(self.invProb.model)

        # Either use the supplied epsilon, or fix base on distribution of
        # model values
        for reg in self.reg.objfcts:

            if getattr(reg, "eps_p", None) is None:

                reg.eps_p = np.percentile(
                    np.abs(reg.mapping * reg._delta_m(self.invProb.model)),
                    self.prctile)

            if getattr(reg, "eps_q", None) is None:

                reg.eps_q = np.percentile(
                    np.abs(reg.mapping * reg._delta_m(self.invProb.model)),
                    self.prctile)

        # Re-assign the norms supplied by user l2 -> lp
        for reg, norms in zip(self.reg.objfcts, self.norms):
            reg.norms = norms

        # Save l2-model
        self.invProb.l2model = self.invProb.model.copy()

        # Print to screen
        for reg in self.reg.objfcts:
            if not self.silent:
                print("eps_p: " + str(reg.eps_p) + " eps_q: " + str(reg.eps_q))

    def angleScale(self):
        """
            Update the scales used by regularization for the
            different block of models
        """
        # Currently implemented for MVI-S only
        max_p = []
        for reg in self.reg.objfcts[0].objfcts:
            eps_p = reg.epsilon
            f_m = abs(reg.f_m)
            max_p += [np.max(f_m)]

        max_p = np.asarray(max_p).max()

        max_s = [np.pi, np.pi]
        for obj, var in zip(self.reg.objfcts[1:3], max_s):
            obj.scales = np.ones(obj.scales.shape) * max_p / var

    def validate(self, directiveList):
        # check if a linear preconditioner is in the list, if not warn else
        # assert that it is listed after the IRLS directive
        dList = directiveList.dList
        self_ind = dList.index(self)
        lin_precond_ind = [isinstance(d, UpdatePreconditioner) for d in dList]

        if any(lin_precond_ind):
            assert lin_precond_ind.index(True) > self_ind, (
                "The directive 'UpdatePreconditioner' must be after Update_IRLS "
                "in the directiveList")
        else:
            warnings.warn(
                "Without a Linear preconditioner, convergence may be slow. "
                "Consider adding `Directives.UpdatePreconditioner` to your "
                "directives list")
        return True
Beispiel #22
0
class SimpleComboRegularization(ComboObjectiveFunction):
    def __init__(self, mesh, objfcts=[], **kwargs):

        super(SimpleComboRegularization, self).__init__(objfcts=objfcts,
                                                        multipliers=None)
        self.regmesh = RegularizationMesh(mesh)
        if "indActive" in kwargs.keys():
            indActive = kwargs.pop("indActive")
            self.regmesh.indActive = indActive
        utils.setKwargs(self, **kwargs)

        # link these attributes
        linkattrs = [
            "regmesh",
            "indActive",
        ]

        for attr in linkattrs:
            val = getattr(self, attr)
            if val is not None:
                [setattr(fct, attr, val) for fct in self.objfcts]

    # Properties
    alpha_s = props.Float("smallness weight")
    alpha_x = props.Float("weight for the first x-derivative")
    alpha_y = props.Float("weight for the first y-derivative")
    alpha_z = props.Float("weight for the first z-derivative")
    alpha_xx = props.Float("weight for the second x-derivative")
    alpha_yy = props.Float("weight for the second y-derivative")
    alpha_zz = props.Float("weight for the second z-derivative")

    counter = None

    mref = props.Array("reference model")
    mrefInSmooth = properties.Bool(
        "include mref in the smoothness calculation?", default=False)
    indActive = properties.Array("indices of active cells in the mesh",
                                 dtype=(bool, int))
    cell_weights = properties.Array(
        "regularization weights applied at cell centers", dtype=float)
    scale = properties.Float("function scaling applied inside the norm",
                             default=1.0)
    regmesh = properties.Instance("regularization mesh",
                                  RegularizationMesh,
                                  required=True)
    mapping = properties.Instance(
        "mapping which is applied to model in the regularization",
        maps.IdentityMap,
        default=maps.IdentityMap(),
    )

    # Other properties and methods
    @property
    def nP(self):
        """
        number of model parameters
        """
        if getattr(self.mapping, "nP") != "*":
            return self.mapping.nP
        elif getattr(self.regmesh, "nC") != "*":
            return self.regmesh.nC
        else:
            return "*"

    @property
    def _nC_residual(self):
        """
        Shape of the residual
        """
        nC = getattr(self.regmesh, "nC", None)
        mapping = getattr(self, "mapping", None)

        if nC != "*" and nC is not None:
            return self.regmesh.nC
        elif mapping is not None and mapping.shape[0] != "*":
            return self.mapping.shape[0]
        else:
            return self.nP

    def _delta_m(self, m):
        if self.mref is None:
            return m
        return -self.mref + m  # in case self.mref is Zero, returns type m

    @property
    def multipliers(self):
        """
        Factors that multiply the objective functions that are summed together
        to build to composite regularization
        """
        return [
            getattr(self, "{alpha}".format(alpha=objfct._multiplier_pair))
            for objfct in self.objfcts
        ]

    # Observers and Validators
    @properties.validator("indActive")
    def _cast_to_bool(self, change):
        value = change["value"]
        if value is not None:
            if value.dtype != "bool":  # cast it to a bool otherwise
                tmp = value
                value = np.zeros(self.regmesh.nC, dtype=bool)
                value[tmp] = True
                change["value"] = value

        # update regmesh indActive
        if getattr(self, "regmesh", None) is not None:
            self.regmesh.indActive = utils.mkvc(value)

    @properties.observer("indActive")
    def _update_regmesh_indActive(self, change):
        # update regmesh indActive
        if getattr(self, "regmesh", None) is not None:
            self.regmesh.indActive = change["value"]

    @properties.observer("mref")
    def _mirror_mref_to_objfctlist(self, change):
        for fct in self.objfcts:
            if getattr(fct, "mrefInSmooth", None) is not None:
                if self.mrefInSmooth is False:
                    fct.mref = utils.Zero()
                else:
                    fct.mref = change["value"]
            else:
                fct.mref = change["value"]

    @properties.observer("mrefInSmooth")
    def _mirror_mrefInSmooth_to_objfctlist(self, change):
        for fct in self.objfcts:
            if getattr(fct, "mrefInSmooth", None) is not None:
                fct.mrefInSmooth = change["value"]

    @properties.observer("indActive")
    def _mirror_indActive_to_objfctlist(self, change):
        value = change["value"]
        if value is not None:
            if value.dtype != "bool":
                tmp = value
                value = np.zeros(self.mesh.nC, dtype=bool)
                value[tmp] = True
                change["value"] = value

        if getattr(self, "regmesh", None) is not None:
            self.regmesh.indActive = value

        for fct in self.objfcts:
            fct.indActive = value
Beispiel #23
0
class SparseDeriv(BaseSparse):
    """
    Base Class for sparse regularization on first spatial derivatives
    """

    def __init__(self, mesh, orientation="x", **kwargs):
        self.orientation = orientation
        super(SparseDeriv, self).__init__(mesh=mesh, **kwargs)

    mrefInSmooth = properties.Bool(
        "include mref in the smoothness calculation?", default=False
    )

    # Give the option to scale or not
    scaledIRLS = properties.Bool("Scale the gradients of the IRLS norms", default=True)

    @utils.timeIt
    def __call__(self, m):
        """
        We use a weighted 2-norm objective function

        .. math::

            r(m) = \\frac{1}{2}
        """
        if self.mrefInSmooth:

            f_m = self._delta_m(m)

        else:
            f_m = m
        if self.scale is None:
            self.scale = np.ones(self.mapping.shape[0])

        if self.space == "spherical":
            Ave = getattr(self.regmesh, "aveCC2F{}".format(self.orientation))

            if getattr(self, "model", None) is None:
                R = utils.speye(self.cellDiffStencil.shape[0])

            else:
                r = self.R(self.f_m)
                R = utils.sdiag(r)

            if self.cell_weights is not None:
                W = utils.sdiag((Ave * (self.scale * self.cell_weights)) ** 0.5) * R

            else:
                W = utils.sdiag((Ave * (self.scale * self.regmesh.vol)) ** 0.5) * R

            theta = self.cellDiffStencil * (self.mapping * f_m)
            dmdx = utils.mat_utils.coterminal(theta)
            r = W * dmdx

        else:
            r = self.W * (self.mapping * f_m)

        return 0.5 * r.dot(r)

    def R(self, f_m):
        # if R is stashed, return that instead
        if getattr(self, "stashedR") is not None:
            return self.stashedR

        # Default
        eta = np.ones_like(f_m)

        if self.scaledIRLS:
            # Eta scaling is important for mix-norms...do not mess with it
            # Scale on l2-norm gradient: f_m.max()
            maxVal = np.ones_like(f_m) * np.abs(f_m).max()

            # Compute theoritical maximum gradients for p < 1
            maxVal[self.norm < 1] = self.epsilon / np.sqrt(
                1.0 - self.norm[self.norm < 1]
            )
            maxGrad = maxVal / (
                maxVal ** 2.0 + (self.epsilon * self.length_scales) ** 2.0
            ) ** (1.0 - self.norm / 2.0)

            # Scaling Factor
            eta[maxGrad != 0] = np.abs(f_m).max() / maxGrad[maxGrad != 0]

        # Scaled-IRLS weights
        r = (
            eta
            / (f_m ** 2.0 + (self.epsilon * self.length_scales) ** 2.0)
            ** (1.0 - self.norm / 2.0)
        ) ** 0.5
        self.stashedR = r  # stash on the first calculation
        return r

    @utils.timeIt
    def deriv(self, m):
        """

        The regularization is:

        .. math::

            R(m) = \\frac{1}{2}\mathbf{(m-m_\\text{ref})^\\top W^\\top
                   W(m-m_\\text{ref})}

        So the derivative is straight forward:

        .. math::

            R(m) = \mathbf{W^\\top W (m-m_\\text{ref})}

        """

        if self.mrefInSmooth:

            model = self._delta_m(m)

        else:
            model = m
        if self.scale is None:
            self.scale = np.ones(self.mapping.shape[0])

        if self.space == "spherical":
            Ave = getattr(self.regmesh, "aveCC2F{}".format(self.orientation))

            if getattr(self, "model", None) is None:
                R = utils.speye(self.cellDiffStencil.shape[0])

            else:
                r = self.R(self.f_m)
                R = utils.sdiag(r)

            if self.cell_weights is not None:
                W = utils.sdiag(((Ave * (self.scale * self.cell_weights))) ** 0.5) * R

            else:
                W = utils.sdiag((Ave * (self.scale * self.regmesh.vol)) ** 0.5) * R

            theta = self.cellDiffStencil * (self.mapping * model)
            dmdx = utils.mat_utils.coterminal(theta)

            r = W * dmdx

        else:
            r = self.W * (self.mapping * model)

        mD = self.mapping.deriv(model)
        return mD.T * (self.W.T * r)

    @property
    def _multiplier_pair(self):
        return "alpha_{orientation}".format(orientation=self.orientation)

    @property
    def f_m(self):

        if self.mrefInSmooth:

            f_m = self._delta_m(self.model)

        else:
            f_m = self.model

        if self.space == "spherical":
            theta = self.cellDiffStencil * (self.mapping * f_m)
            dmdx = utils.mat_utils.coterminal(theta)

        else:

            if self.gradientType == "total":
                Ave = getattr(self.regmesh, "aveCC2F{}".format(self.orientation))

                dmdx = np.abs(
                    self.regmesh.aveFx2CC
                    * self.regmesh.cellDiffxStencil
                    * (self.mapping * f_m)
                )

                if self.regmesh.dim > 1:

                    dmdx += np.abs(
                        self.regmesh.aveFy2CC
                        * self.regmesh.cellDiffyStencil
                        * (self.mapping * f_m)
                    )

                if self.regmesh.dim > 2:

                    dmdx += np.abs(
                        self.regmesh.aveFz2CC
                        * self.regmesh.cellDiffzStencil
                        * (self.mapping * f_m)
                    )

                dmdx = Ave * dmdx

            else:
                dmdx = self.cellDiffStencil * (self.mapping * f_m)

        return dmdx

    @property
    def cellDiffStencil(self):
        return utils.sdiag(self.length_scales) * getattr(
            self.regmesh, "cellDiff{}Stencil".format(self.orientation)
        )

    @property
    def W(self):

        Ave = getattr(self.regmesh, "aveCC2F{}".format(self.orientation))

        if getattr(self, "model", None) is None:
            R = utils.speye(self.cellDiffStencil.shape[0])

        else:
            r = self.R(self.f_m)
            R = utils.sdiag(r)
        if self.scale is None:
            self.scale = np.ones(self.mapping.shape[0])
        if self.cell_weights is not None:
            return (
                utils.sdiag((Ave * (self.scale * self.cell_weights)) ** 0.5)
                * R
                * self.cellDiffStencil
            )
        else:
            return (
                utils.sdiag((Ave * (self.scale * self.regmesh.vol)) ** 0.5)
                * R
                * self.cellDiffStencil
            )

    @property
    def length_scales(self):
        """
            Normalized cell based weighting

        """
        Ave = getattr(self.regmesh, "aveCC2F{}".format(self.orientation))

        if getattr(self, "_length_scales", None) is None:
            index = "xyz".index(self.orientation)

            length_scales = Ave * (
                self.regmesh.Pac.T * self.regmesh.mesh.h_gridded[:, index]
            )

            self._length_scales = length_scales.min() / length_scales

        return self._length_scales

    @length_scales.setter
    def length_scales(self, value):
        self._length_scales = value
Beispiel #24
0
class ExpFitSurvey(Survey.BaseSurvey, properties.HasProperties):

    time = properties.Array("Time channels (s) at current off-time",
                            dtype=float)

    wave_type = properties.StringChoice(
        "Source location",
        default="stepoff",
        choices=["stepoff", "general", "general_conv"])

    moment_type = properties.StringChoice("Source moment type",
                                          default="single",
                                          choices=["single", "dual"])

    n_pulse = properties.Integer("The number of pulses", default=1)

    base_frequency = properties.Float("Base frequency (Hz)")

    time_input_currents = properties.Array("Time for input currents",
                                           dtype=float)

    input_currents = properties.Array("Input currents", dtype=float)

    t0 = properties.Float("End of the ramp")

    use_lowpass_filter = properties.Bool("Switch for low pass filter",
                                         default=False)

    high_cut_frequency = properties.Float(
        "High cut frequency for low pass filter (Hz)", default=1e5)

    # Predicted data
    _pred = None

    # ------------- For dual moment ------------- #

    time_dual_moment = properties.Array(
        "Off-time channels (s) for the dual moment", dtype=float)

    time_input_currents_dual_moment = properties.Array(
        "Time for input currents (dual moment)", dtype=float)

    input_currents_dual_moment = properties.Array(
        "Input currents (dual moment)", dtype=float)

    t0_dual_moment = properties.Array("End of the ramp", dtype=float)

    base_frequency_dual_moment = properties.Float(
        "Base frequency for the dual moment (Hz)")

    xyz = properties.Array("sounding locations", dtype=float, shape=('*', '*'))

    uncert = None

    def __init__(self, **kwargs):
        Survey.BaseSurvey.__init__(self, **kwargs)

    @property
    def n_time(self):
        n_time = self.time.size
        if self.moment_type == 'dual':
            n_time += self.time_dual_moment.size
        return n_time

    @property
    def n_sounding(self):
        return self.xyz.shape[0]

    @property
    def nD(self):
        """
            # of data
        """
        return self.n_time * self.n_sounding

    def eval(self, f):
        return f

    def set_uncertainty(self, dobs, perc=0.1, floor=0., floorIP=0.):
        # TODO: need to consider dual moment
        self.uncert = np.zeros((self.n_time, self.n_sounding))
        self.dobs = dobs
        dobs = dobs.reshape((self.n_time, self.n_sounding), order='F')
        for itx in range(self.n_sounding):
            ipind = dobs[:, itx] < 0.
            # Set different uncertainty for stations having negative transients
            if (ipind).sum() > 3:
                ip = dobs[ipind, itx]
                self.uncert[:, itx] = (perc * abs(dobs[:, itx]) +
                                       abs(ip).max() * 10)
                self.uncert[ipind, itx] = np.Inf
            else:
                self.uncert[:, itx] = perc * abs(dobs[:, itx]) + floor
        self.uncert = Utils.mkvc(self.uncert)

        return self.uncert
Beispiel #25
0
class BaseComboRegularization(ObjectiveFunction.ComboObjectiveFunction):
    def __init__(self, mesh, objfcts=[], **kwargs):

        super(BaseComboRegularization, self).__init__(objfcts=objfcts,
                                                      multipliers=None)
        self.regmesh = RegularizationMesh(mesh)
        Utils.setKwargs(self, **kwargs)

        # link these attributes
        linkattrs = ['regmesh', 'indActive', 'cell_weights', 'mapping']

        for attr in linkattrs:
            val = getattr(self, attr)
            if val is not None:
                [setattr(fct, attr, val) for fct in self.objfcts]

    # Properties
    alpha_s = Props.Float("smallness weight")
    alpha_x = Props.Float("weight for the first x-derivative")
    alpha_y = Props.Float("weight for the first y-derivative")
    alpha_z = Props.Float("weight for the first z-derivative")
    alpha_xx = Props.Float("weight for the second x-derivative")
    alpha_yy = Props.Float("weight for the second y-derivative")
    alpha_zz = Props.Float("weight for the second z-derivative")

    counter = None

    mref = Props.Array("reference model")
    mrefInSmooth = properties.Bool(
        "include mref in the smoothness calculation?", default=False)
    indActive = properties.Array("indices of active cells in the mesh",
                                 dtype=(bool, int))
    cell_weights = properties.Array(
        "regularization weights applied at cell centers", dtype=float)
    regmesh = properties.Instance("regularization mesh",
                                  RegularizationMesh,
                                  required=True)
    mapping = properties.Instance(
        "mapping which is applied to model in the regularization",
        Maps.IdentityMap,
        default=Maps.IdentityMap())

    # Other properties and methods
    @property
    def nP(self):
        """
        number of model parameters
        """
        if getattr(self.mapping, 'nP') != '*':
            return self.mapping.nP
        elif getattr(self.regmesh, 'nC') != '*':
            return self.regmesh.nC
        else:
            return '*'

    @property
    def _nC_residual(self):
        """
        Shape of the residual
        """
        if getattr(self.regmesh, 'nC', None) != '*':
            return self.regmesh.nC
        elif getattr(self, 'mapping', None) != '*':
            return self.mapping.shape[0]
        else:
            return self.nP

    def _delta_m(self, m):
        if self.mref is None:
            return m
        return (-self.mref + m)  # in case self.mref is Zero, returns type m

    @property
    def multipliers(self):
        """
        Factors that multiply the objective functions that are summed together
        to build to composite regularization
        """
        return [
            getattr(self, '{alpha}'.format(alpha=objfct._multiplier_pair))
            for objfct in self.objfcts
        ]

    # Observers and Validators
    @properties.validator('indActive')
    def _cast_to_bool(self, change):
        value = change['value']
        if value is not None:
            if value.dtype != 'bool':  # cast it to a bool otherwise
                tmp = value
                value = np.zeros(self.regmesh.nC, dtype=bool)
                value[tmp] = True
                change['value'] = value

        # update regmesh indActive
        if getattr(self, 'regmesh', None) is not None:
            self.regmesh.indActive = Utils.mkvc(value)

    @properties.observer('indActive')
    def _update_regmesh_indActive(self, change):
        # update regmesh indActive
        if getattr(self, 'regmesh', None) is not None:
            self.regmesh.indActive = change['value']

    @properties.validator('cell_weights')
    def _validate_cell_weights(self, change):
        if change['value'] is not None:
            # todo: residual size? we need to know the expected end shape
            if self._nC_residual != '*':
                assert len(change['value']) == self._nC_residual, (
                    'cell_weights must be length {} not {}'.format(
                        self._nC_residual, len(change['value'])))

    @properties.observer('mref')
    def _mirror_mref_to_objfctlist(self, change):
        for fct in self.objfcts:
            if getattr(fct, 'mrefInSmooth', None) is not None:
                if self.mrefInSmooth is False:
                    fct.mref = Utils.Zero()
                else:
                    fct.mref = change['value']
            else:
                fct.mref = change['value']

    @properties.observer('mrefInSmooth')
    def _mirror_mrefInSmooth_to_objfctlist(self, change):
        for fct in self.objfcts:
            if getattr(fct, 'mrefInSmooth', None) is not None:
                fct.mrefInSmooth = change['value']

    @properties.observer('indActive')
    def _mirror_indActive_to_objfctlist(self, change):
        value = change['value']
        if value is not None:
            if value.dtype != 'bool':
                tmp = value
                value = np.zeros(self.mesh.nC, dtype=bool)
                value[tmp] = True
                change['value'] = value

        if getattr(self, 'regmesh', None) is not None:
            self.regmesh.indActive = value

        for fct in self.objfcts:
            fct.indActive = value

    @properties.observer('cell_weights')
    def _mirror_cell_weights_to_objfctlist(self, change):
        for fct in self.objfcts:
            fct.cell_weights = change['value']

    @properties.observer('mapping')
    def _mirror_mapping_to_objfctlist(self, change):
        for fct in self.objfcts:
            fct.mapping = change['value']
Beispiel #26
0
class BaseFDEM(BaseEM):
    """
    Base frequency domain electromagnetic class
    """
    sigma = properties.Complex(
        "Electrical conductivity (S/m)",
        default=1.0,
        cast=True
    )

    frequency = properties.Float(
        "Source frequency (Hz)",
        default=1.,
        min=0.0
    )

    quasistatic = properties.Bool(
        "Use the quasi-static approximation and ignore displacement current?",
        default=False
    )

    @properties.validator('sigma')
    def _validate_real_part(self, change):
        if not np.real(change['value']) > 0:
            raise properties.ValidationError("The real part of sigma must be positive")

    @property
    def omega(self):
        """
        Angular frequency

        .. math::

            \\omega = 2\\pi f

        """
        return omega(self.frequency)

    @property
    def sigma_hat(self):
        """
        conductivity with displacement current contribution

        .. math::

            \\hat{\\sigma} = \\sigma + i \\omega \\varepsilon

        """
        sigma = sigma_hat(
            self.frequency, self.sigma, epsilon=self.epsilon,
            quasistatic=self.quasistatic
        )
        if np.all(np.imag(sigma) == 0):
            sigma = np.real(sigma)
        return sigma

    @property
    def wavenumber(self):
        """
        Wavenumber of an electromagnetic wave in a medium with constant
        physical properties

        .. math::

            k = \\sqrt{\\omega**2 \\mu \\varepsilon - i \\omega \\mu \\sigma}

        """
        return wavenumber(
            self.frequency, self.sigma, mu=self.mu, epsilon=self.epsilon,
            quasistatic=self.quasistatic
        )

    @property
    def skin_depth(self):
        """
        Distance at which an em wave has decayed by a factor of :math:`1/e` in
        a medium with constant physical properties

        .. math::

            \\sqrt{\\frac{2}{\\omega \\sigma \\mu}}

        """
        return skin_depth(self.frequency, self.sigma, mu=self.mu)
Beispiel #27
0
class Sparse(BaseComboRegularization):
    """
    The regularization is:

    .. math::

        R(m) = \\frac{1}{2}\mathbf{(m-m_\\text{ref})^\\top W^\\top R^\\top R
        W(m-m_\\text{ref})}

    where the IRLS weight

    .. math::

        R = \eta TO FINISH LATER!!!

    So the derivative is straight forward:

    .. math::

        R(m) = \mathbf{W^\\top R^\\top R W (m-m_\\text{ref})}

    The IRLS weights are recomputed after each beta solves.
    It is strongly recommended to do a few Gauss-Newton iterations
    before updating.
    """

    def __init__(
        self, mesh, alpha_s=1.0, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, **kwargs
    ):

        objfcts = [
            SparseSmall(mesh=mesh, **kwargs),
            SparseDeriv(mesh=mesh, orientation="x", **kwargs),
        ]

        if mesh.dim > 1:
            objfcts.append(SparseDeriv(mesh=mesh, orientation="y", **kwargs))

        if mesh.dim > 2:
            objfcts.append(SparseDeriv(mesh=mesh, orientation="z", **kwargs))

        super(Sparse, self).__init__(
            mesh=mesh,
            objfcts=objfcts,
            alpha_s=alpha_s,
            alpha_x=alpha_x,
            alpha_y=alpha_y,
            alpha_z=alpha_z,
            **kwargs
        )

        # Utils.setKwargs(self, **kwargs)

    # Properties
    norms = properties.Array(
        "Norms used to create the sparse regularization",
        default=np.c_[2.0, 2.0, 2.0, 2.0],
        shape={("*", "*")},
    )

    eps_p = properties.Float("Threshold value for the model norm", required=True)

    eps_q = properties.Float(
        "Threshold value for the model gradient norm", required=True
    )

    model = properties.Array("current model", dtype=float)

    space = properties.String("type of model", default="linear")

    gradientType = properties.String("type of gradient", default="components")

    scales = properties.Array(
        "General nob for scaling", default=np.c_[1.0, 1.0, 1.0, 1.0], shape={("*", "*")}
    )
    # Give the option to scale or not
    scaledIRLS = properties.Bool("Scale the gradients of the IRLS norms", default=True)
    # Save the l2 result during the IRLS
    l2model = None

    @properties.validator("norms")
    def _validate_norms(self, change):
        if change["value"].shape[0] == 1:
            change["value"] = np.kron(
                np.ones((self.regmesh.Pac.shape[1], 1)), change["value"]
            )
        elif change["value"].shape[0] > 1:
            assert change["value"].shape[0] == self.regmesh.Pac.shape[1], (
                "Vector of norms must be the size"
                " of active model parameters ({})"
                "The provided vector has length "
                "{}".format(self.regmesh.Pac.shape[0], len(change["value"]))
            )

    # Observers
    @properties.observer("norms")
    def _mirror_norms_to_objfcts(self, change):

        self.objfcts[0].norm = change["value"][:, 0]
        for i, objfct in enumerate(self.objfcts[1:]):
            Ave = getattr(objfct.regmesh, "aveCC2F{}".format(objfct.orientation))
            objfct.norm = Ave * change["value"][:, i + 1]

    @properties.observer("model")
    def _mirror_model_to_objfcts(self, change):
        for objfct in self.objfcts:
            objfct.model = change["value"]

    @properties.observer("eps_p")
    def _mirror_eps_p_to_smallness(self, change):
        for objfct in self.objfcts:
            if isinstance(objfct, SparseSmall):
                objfct.epsilon = change["value"]

    @properties.observer("eps_q")
    def _mirror_eps_q_to_derivs(self, change):
        for objfct in self.objfcts:
            if isinstance(objfct, SparseDeriv):
                objfct.epsilon = change["value"]

    @properties.observer("space")
    def _mirror_space_to_objfcts(self, change):
        for objfct in self.objfcts:
            objfct.space = change["value"]

    @properties.observer("gradientType")
    def _mirror_gradientType_to_objfcts(self, change):
        for objfct in self.objfcts:
            objfct.gradientType = change["value"]

    @properties.observer("scaledIRLS")
    def _mirror_scaledIRLS_to_objfcts(self, change):
        for objfct in self.objfcts:
            objfct.scaledIRLS = change["value"]

    @properties.validator("scales")
    def _validate_scales(self, change):
        if change["value"].shape[0] == 1:
            change["value"] = np.kron(
                np.ones((self.regmesh.Pac.shape[1], 1)), change["value"]
            )
        elif change["value"].shape[0] > 1:
            assert change["value"].shape[0] == self.regmesh.Pac.shape[1], (
                "Vector of scales must be the size"
                " of active model parameters ({})"
                "The provided vector has length "
                "{}".format(self.regmesh.Pac.shape[0], len(change["value"]))
            )

    # Observers
    @properties.observer("scales")
    def _mirror_scale_to_objfcts(self, change):
        for i, objfct in enumerate(self.objfcts):
            objfct.scale = change["value"][:, i]
Beispiel #28
0
 class HasProps2(properties.HasProperties):
     my_list = properties.List('my list', properties.Bool(''))
     five = properties.GettableProperty('five', default=5)
     my_array = properties.Vector3Array('my array')
Beispiel #29
0
class BaseDCSimulation(BaseEMSimulation):
    """
    Base DC Problem
    """

    survey = properties.Instance("a DC survey object", Survey, required=True)

    storeJ = properties.Bool("store the sensitivity matrix?", default=False)

    _mini_survey = None

    Ainv = None
    _Jmatrix = None
    gtgdiag = None

    def __init__(self, *args, **kwargs):
        miniaturize = kwargs.pop("miniaturize", False)
        super().__init__(*args, **kwargs)
        # Do stuff to simplify the forward and JTvec operation if number of dipole
        # sources is greater than the number of unique pole sources
        if miniaturize:
            self._dipoles, self._invs, self._mini_survey = _mini_pole_pole(
                self.survey)

    def fields(self, m=None, calcJ=True):
        if m is not None:
            self.model = m
            self._Jmatrix = None

        f = self.fieldsPair(self)
        if self.Ainv is not None:
            self.Ainv.clean()
        A = self.getA()
        self.Ainv = self.solver(A, **self.solver_opts)
        RHS = self.getRHS()

        f[:, self._solutionType] = self.Ainv * RHS

        return f

    def getJ(self, m, f=None):
        if self._Jmatrix is None:
            if f is None:
                f = self.fields(m)
            self._Jmatrix = self._Jtvec(m, v=None, f=f).T
        return self._Jmatrix

    def dpred(self, m=None, f=None):
        if self._mini_survey is not None:
            # Temporarily set self.survey to self._mini_survey
            survey = self.survey
            self.survey = self._mini_survey

        data = super().dpred(m=m, f=f)

        if self._mini_survey is not None:
            # reset survey
            self.survey = survey

        return self._mini_survey_data(data)

    def getJtJdiag(self, m, W=None):
        """
            Return the diagonal of JtJ
        """
        if self.gtgdiag is None:
            J = self.getJ(m)

            if W is None:
                W = np.ones(J.shape[0])
            else:
                W = W.diagonal()**2

            diag = np.zeros(J.shape[1])
            for i in range(J.shape[0]):
                diag += (W[i]) * (J[i] * J[i])

            self.gtgdiag = diag
        return self.gtgdiag

    def Jvec(self, m, v, f=None):
        """
            Compute sensitivity matrix (J) and vector (v) product.
        """

        if f is None:
            f = self.fields(m)

        if self.storeJ:
            J = self.getJ(m, f=f)
            return J.dot(v)

        self.model = m

        if self._mini_survey is not None:
            survey = self._mini_survey
        else:
            survey = self.survey

        Jv = []
        for source in survey.source_list:
            u_source = f[source, self._solutionType]  # solution vector
            dA_dm_v = self.getADeriv(u_source, v)
            dRHS_dm_v = self.getRHSDeriv(source, v)
            du_dm_v = self.Ainv * (-dA_dm_v + dRHS_dm_v)
            for rx in source.receiver_list:
                df_dmFun = getattr(f, "_{0!s}Deriv".format(rx.projField), None)
                df_dm_v = df_dmFun(source, du_dm_v, v, adjoint=False)
                Jv.append(rx.evalDeriv(source, self.mesh, f, df_dm_v))
        Jv = np.hstack(Jv)
        return self._mini_survey_data(Jv)

    def Jtvec(self, m, v, f=None):
        """
            Compute adjoint sensitivity matrix (J^T) and vector (v) product.
        """

        if f is None:
            f = self.fields(m)

        self.model = m

        if self.storeJ:
            J = self.getJ(m, f=f)
            return np.asarray(J.T.dot(v))

        return self._Jtvec(m, v=v, f=f)

    def _Jtvec(self, m, v=None, f=None):
        """
            Compute adjoint sensitivity matrix (J^T) and vector (v) product.
            Full J matrix can be computed by inputing v=None
        """

        if self._mini_survey is not None:
            survey = self._mini_survey
        else:
            survey = self.survey

        if v is not None:
            if isinstance(v, Data):
                v = v.dobs
            v = self._mini_survey_dataT(v)
            v = Data(survey, v)
            Jtv = np.zeros(m.size)
        else:
            # This is for forming full sensitivity matrix
            Jtv = np.zeros((self.model.size, survey.nD), order="F")
            istrt = int(0)
            iend = int(0)

        for source in survey.source_list:
            u_source = f[source, self._solutionType].copy()
            for rx in source.receiver_list:
                # wrt f, need possibility wrt m
                if v is not None:
                    PTv = rx.evalDeriv(source,
                                       self.mesh,
                                       f,
                                       v[source, rx],
                                       adjoint=True)
                else:
                    # This is for forming full sensitivity matrix
                    PTv = rx.getP(self.mesh, rx.projGLoc(f)).toarray().T
                df_duTFun = getattr(f, "_{0!s}Deriv".format(rx.projField),
                                    None)
                df_duT, df_dmT = df_duTFun(source, None, PTv, adjoint=True)

                ATinvdf_duT = self.Ainv * df_duT

                dA_dmT = self.getADeriv(u_source, ATinvdf_duT, adjoint=True)
                dRHS_dmT = self.getRHSDeriv(source, ATinvdf_duT, adjoint=True)
                du_dmT = -dA_dmT + dRHS_dmT
                if v is not None:
                    Jtv += (df_dmT + du_dmT).astype(float)
                else:
                    iend = istrt + rx.nD
                    if rx.nD == 1:
                        Jtv[:, istrt] = df_dmT + du_dmT
                    else:
                        Jtv[:, istrt:iend] = df_dmT + du_dmT
                    istrt += rx.nD

        if v is not None:
            return mkvc(Jtv)
        else:
            return (self._mini_survey_data(Jtv.T)).T

    def getSourceTerm(self):
        """
        Evaluates the sources, and puts them in matrix form
        :rtype: tuple
        :return: q (nC or nN, nSrc)
        """

        if self._mini_survey is not None:
            Srcs = self._mini_survey.source_list
        else:
            Srcs = self.survey.source_list

        if self._formulation == "EB":
            n = self.mesh.nN

        elif self._formulation == "HJ":
            n = self.mesh.nC

        q = np.zeros((n, len(Srcs)), order="F")

        for i, source in enumerate(Srcs):
            q[:, i] = source.eval(self)
        return q

    @property
    def deleteTheseOnModelUpdate(self):
        toDelete = super(BaseDCSimulation, self).deleteTheseOnModelUpdate
        if self._Jmatrix is not None:
            toDelete += ["_Jmatrix"]
        if self.gtgdiag is not None:
            toDelete += ["gtgdiag"]
        return toDelete

    def _mini_survey_data(self, d_mini):
        if self._mini_survey is not None:
            out = d_mini[self._invs[0]]  # AM
            out[self._dipoles[0]] -= d_mini[self._invs[1]]  # AN
            out[self._dipoles[1]] -= d_mini[self._invs[2]]  # BM
            out[self._dipoles[0]
                & self._dipoles[1]] += d_mini[self._invs[3]]  # BN
        else:
            out = d_mini
        return out

    def _mini_survey_dataT(self, v):
        if self._mini_survey is not None:
            out = np.zeros(self._mini_survey.nD)
            # Need to use ufunc.at because there could be repeated indices
            # That need to be properly handled.
            np.add.at(out, self._invs[0], v)  # AM
            np.subtract.at(out, self._invs[1], v[self._dipoles[0]])  # AN
            np.subtract.at(out, self._invs[2], v[self._dipoles[1]])  # BM
            np.add.at(out, self._invs[3],
                      v[self._dipoles[0] & self._dipoles[1]])  # BN
            return out
        else:
            out = v
        return out
Beispiel #30
0
class SparseSmall(BaseSparse):
    """
    Sparse smallness regularization

    **Inputs**

    :param int norm: norm on the smallness
    """

    _multiplier_pair = "alpha_s"

    def __init__(self, mesh, **kwargs):
        super(SparseSmall, self).__init__(mesh=mesh, **kwargs)

    # Give the option to scale or not
    scaledIRLS = properties.Bool("Scale the gradients of the IRLS norms", default=True)

    @property
    def f_m(self):

        return self.mapping * self._delta_m(self.model)

    @property
    def W(self):
        if getattr(self, "model", None) is None:
            R = utils.speye(self.mapping.shape[0])
        else:
            r = self.R(self.f_m)
            R = utils.sdiag(r)

        if self.scale is None:
            self.scale = np.ones(self.mapping.shape[0])

        if self.cell_weights is not None:
            return utils.sdiag((self.scale * self.cell_weights) ** 0.5) * R

        else:
            return utils.sdiag((self.scale * self.regmesh.vol) ** 0.5) * R

    def R(self, f_m):
        # if R is stashed, return that instead
        if getattr(self, "stashedR") is not None:
            return self.stashedR

        # Default
        eta = np.ones_like(f_m)

        if self.scaledIRLS:
            # Eta scaling is important for mix-norms...do not mess with it
            # Scale on l2-norm gradient: f_m.max()
            maxVal = np.ones_like(f_m) * np.abs(f_m).max()

            # Compute theoritical maximum gradients for p < 1
            maxVal[self.norm < 1] = self.epsilon / np.sqrt(
                1.0 - self.norm[self.norm < 1]
            )
            maxGrad = maxVal / (maxVal ** 2.0 + self.epsilon ** 2.0) ** (
                1.0 - self.norm / 2.0
            )
            # Scaling factor
            eta[maxGrad != 0] = np.abs(f_m).max() / maxGrad[maxGrad != 0]

        # Scaled IRLS weights
        r = (eta / (f_m ** 2.0 + self.epsilon ** 2.0) ** (1.0 - self.norm / 2.0)) ** 0.5

        self.stashedR = r  # stash on the first calculation
        return r

    @utils.timeIt
    def deriv(self, m):
        """

        The regularization is:

        .. math::

            R(m) = \\frac{1}{2}\mathbf{(m-m_\\text{ref})^\\top W^\\top
                   W(m-m_\\text{ref})}

        So the derivative is straight forward:

        .. math::

            R(m) = \mathbf{W^\\top W (m-m_\\text{ref})}

        """

        mD = self.mapping.deriv(self._delta_m(m))
        r = self.W * (self.mapping * (self._delta_m(m)))
        return mD.T * (self.W.T * r)