Esempio n. 1
0
    def resampling(self, step):
        """
        Rebuild the sample and its statistics sorted by the likelihood of the parameter values
        """
        θOld = step.theta
        priorOld = step.prior
        dataOld = step.data
        postOld = step.posterior
        # allocate the new entities
        θ = altar.matrix(shape=θOld.shape)
        prior = altar.vector(shape=priorOld.shape)
        data = altar.vector(shape=dataOld.shape)
        posterior = altar.vector(shape=postOld.shape)

        # build a histogram for the new samples and convert it into a vector
        multi = self.computeSampleMultiplicities(step=step).values()
        # print("      histogram as vector:")
        # print("        counts: {}".format(tuple(multi)))

        # unique samples count
        unique_samples = 0
        # sample count
        index = 0
        # indices for kept samples
        indices = altar.vector(shape=multi.shape)

        # record kept sample indices
        for i in range(multi.shape):
            count = int(multi[i])
            # if count is zero, skip
            if count == 0: continue
            # add the unique samples count
            unique_samples += 1
            # duplicate indices
            for ic in range(count):
                indices[index] = i
                index += 1
        # shuffle the indices
        indices.shuffle(rng=self.rng)

        self.info.log(
            f"resampling: unique samples {unique_samples} out of {multi.shape}"
        )
        # print("     kept sample indices: {}".format(tuple(indices[i] for i in range(indices.shape))))

        # copy theta, (prior, data, posterior) over according to the indices
        for i in range(indices.shape):
            # get the index for old samples
            old = int(indices[i])
            # duplicate theta
            for param in range(step.parameters):
                θ[i, param] = θOld[old, param]
            prior[i] = priorOld[old]
            data[i] = dataOld[old]
            posterior[i] = postOld[old]

        # return the shuffled data
        return θ, (prior, data, posterior)
Esempio n. 2
0
 def alloc(cls, samples, parameters):
     """
     Allocate storage for the parts of a cooling step
     """
     # allocate the initial sample set
     theta = altar.matrix(shape=(samples, parameters)).zero()
     # allocate the likelihood vectors
     prior = altar.vector(shape=samples).zero()
     data = altar.vector(shape=samples).zero()
     posterior = altar.vector(shape=samples).zero()
     # build one of my instances and return it
     return cls(beta=0, theta=theta, likelihoods=(prior, data, posterior))
Esempio n. 3
0
    def rank(self, step):
        """
        Rebuild the sample and its statistics sorted by the likelihood of the parameter values
        """
        θOld = step.theta
        priorOld = step.prior
        dataOld = step.data
        postOld = step.posterior
        # allocate the new entities
        θ = altar.matrix(shape=θOld.shape)
        prior = altar.vector(shape=priorOld.shape)
        data = altar.vector(shape=dataOld.shape)
        posterior = altar.vector(shape=postOld.shape)

        # build a histogram for the new samples and convert it into a vector
        multi = self.computeSampleMultiplicities(step=step).values()
        # print("      histogram as vector:")
        # print("        counts: {}".format(tuple(multi)))

        # compute the permutation that would sort the frequency table according to the sample
        # multiplicity, in reverse order
        p = multi.sortIndirect().reverse()
        # print("        sorted: {}".format(tuple(p[i] for i in range(p.shape))))

        # the number of samples we have processed
        done = 0
        # start moving stuff around until we have built a complete sample set
        for i in range(p.shape):
            # the old sample index
            old = p[i]
            # and its multiplicity
            count = int(multi[old])
            # if the count has dropped to zero, we are done
            if count == 0: break
            # otherwise, duplicate this sample {count} times
            for dupl in range(count):
                # update the samples
                for param in range(step.parameters):
                    θ[done, param] = θOld[old, param]
                # update the log-likelihoods
                prior[done] = priorOld[old]
                data[done] = dataOld[old]
                posterior[done] = postOld[old]
                # update the number of processed samples
                done += 1
                # print(i, old, count, done)

        # return the shuffled data
        return θ, (prior, data, posterior)
Esempio n. 4
0
    def computeCp(self, theta_mean):
        """
        Calculate Cp
        """

        # grab the samples  shape=(samples, parameters)
        parameters = self.parameters
        observations = self.observations
        nCmu = self.nCmu

        Cp = altar.matrix(shape=(observations, observations))

        # calculate
        kv = altar.vector(shape=observations)
        cmu = self.Cmu
        kmu = self.Kmu
        Kp = altar.matrix(shape=(observations, nCmu))
        for i in range(nCmu):
            # get kmu_i from list, shape=(observations, parameters)
            kmu_i = kmu[i]
            #  kv = Kmu_i * thetha_mean
            # dgemv y = alpha Op(A) x + beta y
            altar.blas.dgemv(kmu_i.opNoTrans, 1.0, kmu_i, theta_mean, 0.0, kv)
            Kp.setColumn(i, kv)

        # KpC = Kp * Cmu
        KpC = altar.matrix(shape=(observations, nCmu))
        altar.blas.dsymm(cmu.sideRight, cmu.upperTriangular, 1.0, cmu, Kp, 0.0,
                         KpC)
        # Cp = KpC*Kp
        altar.blas.dgemm(KpC.opNoTrans, Kp.opTrans, 1.0, KpC, Kp, 0.0, Cp)

        # all done
        return Cp
Esempio n. 5
0
    def dataLikelihood(self, step):
        """
        Fill {step.data} with the likelihoods of the samples in {step.theta} given the available
        data. This is what is usually referred to as the "forward model"
        """
        # cache the inverse of {σ}
        σ_inv = self.σ_inv

        # grab the portion of the sample that's mine
        θ = self.restrict(theta=step.theta)
        # and the storage for the data likelihoods
        data = step.data

        # find out how many samples in the set
        samples = θ.rows

        # for each sample in the sample set
        for sample in range(samples):
            # prepare vector with the sample difference from the mean
            δ = θ.getRow(sample)
            δ -= self.peak
            # storage for {σ_inv . δ}
            y = altar.vector(shape=δ.shape).zero()
            # compute {σ_inv . δ} and store it in {y}
            altar.blas.dsymv(σ_inv.upperTriangular, 1.0, σ_inv, δ, 0.0, y)
            # finally, form {δ^T . σ_inv . δ}
            v = altar.blas.ddot(δ, y)
            # compute and return the log-likelihood of the data given this sample
            data[sample] += self.normalization - v / 2

        # all done
        return self
Esempio n. 6
0
    def displacements(self, locations, los):
        """
        Compute the expected displacements from a point pressure source at a set of observation
        locations
        """
        # the location of the source
        x_src = self.x
        y_src = self.y
        d_src = self.d
        # its strength
        dV = self.dV
        # get the material properties
        nu = self.nu

        # allocate space for the result
        u = altar.vector(shape=len(locations))
        # go through each observation location
        for index, (x_obs,y_obs) in enumerate(locations):
            # compute displacements
            x = x_src - x_obs
            y = y_src - y_obs
            d = d_src
            # compute the distance to the point source
            x2 = x**2
            y2 = y**2
            d2 = d**2
            # intermediate values
            C = (nu-1) * dV/π
            R = sqrt(x2 + y2 + d2)
            CR3 = C * R**-3
            # store the expected displacement
            u[index] = x*CR3 * los[index, 0] + y*CR3 * los[index, 1] - d*CR3 * los[index,2]

        # all done
        return u
Esempio n. 7
0
    def initializeSample(self, theta):
        """
        Fill my portion of {theta} with initial random values from my distribution.
        """
        # grab the portion of the sample that's mine
        θ = self.restrict(theta=theta)
        # grab the number of samples (rows of theta) 
        samples = θ.rows
        # grab the number of patches/parameters
        parameters = self.patches
    
        # grab the area of patches
        area_patches = self.area_patches
        
        # create a gaussian distribution to generate Mw for each sample
        gaussian_Mw = altar.pdf.gaussian(mean=self.Mw_mean, sigma=self.Mw_sigma, rng=self.rng)
       
        # create a dirichlet distribution to generate displacements
        alpha = altar.vector(shape=parameters).fill(1) # power 0, or (alpha_i = 1) 
        dirichlet_D = altar.pdf.dirichlet(alpha=alpha, rng=self.rng)
       
        # create a tempory vector for theta of samples
        theta_sample = altar.vector(shape=parameters)
        # iterate through samples to initialize samples 
        for sample in range(samples): 
            # generate a Mw sample 
            Mw = gaussian_Mw.sample()
            # Pentiar = M0/Mu =  \sum (A_i D_i) 
            # 15 here is for GPa * Km^2, instead of Pa * m^2   
            Pentier = pow(10, 1.5*Mw + 9.1 - 15)/self.Mu 
            # generate a dirichlet sample \sum x_i = 1
            dirichlet_D.vector(vector=theta_sample)
            # D_i = P * x_i /A_i 
            for parameter in range (parameters):
                theta_sample[parameter]*=Pentier/area_patches[parameter]
            # set theta 
            θ.setRow(sample, theta_sample)

        
        # all done and return
        return self
Esempio n. 8
0
    def displacements(self, locations, los):
        """
        Compute the expected displacements at a set of observation locations from a compound
        (triaxial) dislocation source at depth.
        """
        # the location of the source
        x_src = self.x
        y_src = self.y
        d_src = self.d
        # clockwise rotation angles about x, y, z axes
        omegaX_src = self.omegaX
        omegaY_src = self.omegaY
        omegaZ_src = self.omegaZ
        # semi-lengths
        ax_src = self.ax
        ay_src = self.ay
        az_src = self.az
        # opening
        opening = self.opening
        # get the material properties
        v = self.v

        # from locations, a vector of (x,y) tuples, create the flattened vectors Xf, Yf required by
        # CDM
        Xf = numpy.zeros(len(locations), dtype=float)
        Yf = numpy.zeros(len(locations), dtype=float)
        for i, loc in enumerate(locations):
            Xf[i] = loc[0]
            Yf[i] = loc[1]

        # allocate space for the result
        u = altar.vector(shape=len(locations))
        # compute the displacements
        ue, un, uv = CDM(X=Xf,
                         Y=Yf,
                         X0=x_src,
                         Y0=y_src,
                         depth=d_src,
                         ax=ax_src,
                         ay=ay_src,
                         az=az_src,
                         omegaX=omegaX_src,
                         omegaY=omegaY_src,
                         omegaZ=omegaZ_src,
                         opening=opening,
                         nu=v)
        # go through each observation location
        for idx, (ux, uy, uz) in enumerate(zip(ue, un, uv)):
            # project the expected displacement along LOS and store
            u[idx] = ux * los[idx, 0] + uy * los[idx, 1] + uz * los[idx, 2]

        # all done
        return u
Esempio n. 9
0
 def updateTemperature(self, step):
     """
     Generate the next temperature increment
     """
     # grab the data log-likelihood
     dataLikelihood = step.data
     # initialize the vector of weights
     self.w = altar.vector(shape=step.samples).zero()
     # compute {δβ} and the normalized {w}
     β, self.cov = self.solver.solve(dataLikelihood, self.w)
     # and return the new temperature
     return β
Esempio n. 10
0
    def computeCovariance(self, step):
        """
        Compute the parameter covariance Σ of the sample in {step}

          Σ = c_m^2 \sum_{i \in samples} \tilde{w}_{i} θ_i θ_i^T} - \bar{θ} \bar{θ}^Τ

        where

          \bar{θ} = \sum_{i \in samples} \tilde{w}_{i} θ_{i}

        The covariance Σ gets used to build a proposal pdf for the posterior
        """
        # unpack what i need
        w = self.w  # w is assumed normalized
        θ = step.theta  # the current sample set
        # extract the number of samples and number of parameters
        samples = step.samples
        parameters = step.parameters

        # initialize the covariance matrix
        Σ = altar.matrix(shape=(parameters, parameters)).zero()

        # check the geometries
        assert w.shape == samples
        assert θ.shape == (samples, parameters)
        assert Σ.shape == (parameters, parameters)

        # calculate the weighted mean of every parameter across all samples
        θbar = altar.vector(shape=parameters)
        # for each parameter
        for j in range(parameters):
            # the jth column in θ has the value of this parameter in the various samples
            θbar[j] = θ.getColumn(j).mean(weights=w)
        # start filling out Σ
        for i in range(samples):
            # get the sample
            sample = θ.getRow(i)
            # form Σ += w[i] sample sample^T
            altar.blas.dsyr(Σ.lowerTriangular, w[i], sample, Σ)
        # subtract θbar θbar^T
        altar.blas.dsyr(Σ.lowerTriangular, -1, θbar, Σ)
        # fill the upper triangle
        for i in range(parameters):
            for j in range(i):
                Σ[j, i] = Σ[i, j]

        # condition the covariance matrix
        if self.check_positive_definiteness:
            self.conditionCovariance(Σ=Σ)

        # all done
        return Σ
Esempio n. 11
0
    def initialize(self, application):
        """
        Initialize the state of the model given a {problem} specification
        """
        # externals
        from math import sin, cos

        # chain up
        super().initialize(application=application)

        # initialize my parameter sets
        self.initializeParameterSets()
        # mount the directory with my input data
        self.ifs = self.mountInputDataspace(pfs=application.pfs)

        # load the data from the inputs into memory
        displacements, self.cd = self.loadInputs()

        # compute the normalization
        self.normalization = self.computeNormalization()
        # compute the inverse of the covariance matrix
        self.cd_inv = self.computeCovarianceInverse()

        # build the local representations
        self.points = []
        self.d = altar.vector(shape=self.observations)
        self.los = altar.matrix(shape=(self.observations, 3))
        self.oid = []
        # populate them
        for obs, record in enumerate(displacements):
            # extract the observation id
            self.oid.append(record.oid)
            # extract the (x,y) coordinate of the observation point
            self.points.append((record.x, record.y))
            # extract the observed displacement
            self.d[obs] = record.d
            # get the LOS angles
            theta = record.theta
            phi = record.phi
            # form the projection vectors and store them
            self.los[obs, 0] = sin(theta) * cos(phi)
            self.los[obs, 1] = sin(theta) * sin(phi)
            self.los[obs, 2] = cos(theta)

        # save the parameter meta data
        self.meta()
        # show me
        # self.show(job=application.job, channel=self.info)

        # all done
        return self
Esempio n. 12
0
    def loadData(self):
        """
        load data and covariance
        """

        # grab the input dataspace
        ifs = self.ifs
        # next, the observations
        try:
            # get the path to the file
            df = ifs[self.data_file]
        # if the file doesn't exist
        except ifs.NotFoundError:
            # grab my error channel
            channel = self.error
            # complain
            channel.log(f"missing observations: no '{self.data_file}' {ifs.path()}")
            # and raise the exception again
            raise
        # if all goes well
        else:
            # allocate the vector
            self.dataobs= altar.vector(shape=self.observations)
            # and load the file contents into memory
            self.dataobs.load(df.uri)

        if self.cd_file is not None:
            # finally, the data covariance
            try:
                # get the path to the file
                cf = ifs[self.cd_file]
            # if the file doesn't exist
            except ifs.NotFoundError:
                # grab my error channel
                channel = self.error
                # complain
                channel.log(f"missing data covariance matrix: no '{self.cd_file}'")
                # and raise the exception again
                raise
            # if all goes well
            else:
                # allocate the matrix
                self.cd = altar.matrix(shape=(self.observations, self.observations))
                # and load the file contents into memory
                self.cd.load(cf.uri)
        else:
            # use a constant covariance
            self.cd = self.cd_std
        return
Esempio n. 13
0
 def initialize(self, rng):
     """
     Initialize with the given random number generator
     """
     # initialize the parent uniform distribution
     super().initialize(rng=rng)
     
     # initialize the area for each patches
     self.patches = self.parameters  
     # by default, assign the constant patch_area to each patch 
     self.area_patches = altar.vector(shape=self.patches).fill(self.area)
     # if a file is provided, load it 
     if self.area_patches_file is not None: 
         self.area_patches.load(self.area_patches_file.uri)
     
     # all done
     return self
Esempio n. 14
0
    def computeSampleMultiplicities(self, step):
        """
        Prepare a frequency vector for the new samples given the scaled data log-likelihood in
        {w} for this cooling step
        """
        # print("    computing sample multiplicities:")
        # unpack what we need
        w = self.w
        samples = step.samples

        # build a vector of random numbers uniformly distributed in [0,1]
        r = altar.vector(shape=samples).random(pdf=self.uniform)
        # compute the bin edges in the range [0, 1]
        ticks = tuple(self.buildHistogramRanges(w))
        # build a histogram
        h = altar.histogram(bins=samples).ranges(points=ticks).fill(r)
        # and return it
        return h
Esempio n. 15
0
    def updateTemperature(self, step):
        """
        Generate the next temperature increment
        """
        # grab the data log-likelihood
        dataLikelihood  = step.data

        # initialize the vector of weights
        self.w = altar.vector(shape=step.samples).zero()
        # compute the median data log-likelihood; clone the source vector first, since the
        # sorting happens in place
        median = dataLikelihood.clone().sort().median()

        # compute {δβ} and the normalized {w}
        β, self.cov = altar.libaltar.dbeta(self.minimizer, dataLikelihood.data, median, self.w.data)

        # and return the new temperature
        return β
Esempio n. 16
0
    def __init__(self, **kwds):
        # chain up
        super().__init__(**kwds)

        # local names for the math functions
        log, π, cos, sin = math.log, math.pi, math.cos, math.sin

        # the number of model parameters
        dof = self.parameters

        # convert the central value into a vector; allocate
        peak = altar.vector(shape=dof)
        # and populate
        for index, value in enumerate(self.μ):
            peak[index] = value

        # the trigonometry
        cos_φ = cos(self.φ)
        sin_φ = sin(self.φ)
        # the eigenvalues
        λ0 = self.λ[0]
        λ1 = self.λ[1]
        # the eigenvalue inverses
        λ0_inv = 1 / λ0
        λ1_inv = 1 / λ1

        # build the inverse of the covariance matrix
        σ_inv = altar.matrix(shape=(dof, dof))
        σ_inv[0, 0] = λ0_inv * cos_φ**2 + λ1_inv * sin_φ**2
        σ_inv[1, 1] = λ1_inv * cos_φ**2 + λ0_inv * sin_φ**2
        σ_inv[0, 1] = σ_inv[1, 0] = (λ1_inv - λ0_inv) * cos_φ * sin_φ

        # compute its determinant and store it
        σ_lndet = log(λ0 * λ1)

        # attach the characteristics of my pdf
        self.peak = peak
        self.σ_inv = σ_inv

        # the log-normalization
        self.normalization = -.5 * (dof * log(2 * π) + σ_lndet)

        # all done
        return
Esempio n. 17
0
    def displacements(self, locations, los):
        """
        Compute the expected displacements at a set of observation locations from a
        two magma chamber (reverso) volcano model
        """
        # the radius of the shallow reservoir
        as_src = self.as
        # the radius of the deep reservoir
        ad_src = self.ad
        # the radius of the connecting pipe between the two reservoirs
        ac_src = self.ac
        # depth of the shallow reservoir
        hs_src = self.hs
        # depth of the deep reservoir
        hd_src = self.hd
        # the basal magma inflow rate
        q_src  = self.q

        # get the material properties
        v = self.v

        # from locations, a vector of (x,y) tuples, create the flattened vectors Xf, Yf required by
        # CDM
        Xf = numpy.zeros(len(locations), dtype=float)
        Yf = numpy.zeros(len(locations), dtype=float)
        for i, loc in enumerate(locations):
            Xf[i] = loc[0]
            Yf[i] = loc[1]

        # allocate space for the result
        u = altar.vector(shape=len(locations))
        # compute the displacements
        ue, un, uv =  Reverso(X=Xf, Y=Yf, X0=x_src, Y0=y_src, depth=d_src,
                          ax=ax_src, ay=ay_src, az=az_src,
                          omegaX=omegaX_src, omegaY=omegaY_src, omegaZ=omegaZ_src,
                          opening=opening, nu=v)
        # go through each observation location
        for idx, (ux,uy,uz) in enumerate(zip(ue, un, uv)):
            # project the expected displacement along LOS and store
            u[idx] = ux * los[idx,0] + uy * los[idx,1] + uz * los[idx,2]

        # all done
        return u
Esempio n. 18
0
    def forwardModelBatched(self, theta, prediction):
        """
        The forward model for a batch of theta: compute prediction from theta
        also return {residual}=True, False if the difference between data and prediction is computed
        """

        # The default method computes samples one by one
        batch = self.samples
        # create a prediction vector
        prediction_sample = altar.vector(shape=self.observations)
        # iterate over samples
        for sample in range(batch):
            # obtain the sample (one set of parameters)
            theta_sample = theta.getRow(sample)
            # call the forward model
            self.forwardModel(theta=theta_sample, prediction=prediction_sample)
            # copy to the prediction matrix
            prediction.setRow(sample, prediction_sample)

        # all done
        return self
Esempio n. 19
0
    def walkChains(self, annealer, step):
        """
        Run the Metropolis algorithm on the Markov chains
        """
        # get the model
        model = annealer.model
        # and the event dispatcher
        dispatcher = annealer.dispatcher

        # unpack what i need from the cooling step
        β = step.beta
        θ = step.theta
        prior = step.prior
        data = step.data
        posterior = step.posterior
        # get the parameter covariance
        Σ_chol = self.sigma_chol
        # the sample geometry
        samples = step.samples
        parameters = step.parameters
        # a couple of functions from the math module
        exp = math.exp
        log = math.log

        # reset the accept/reject counters
        accepted = rejected = unlikely = 0

        # allocate some vectors that we use throughout the following
        # candidate likelihoods
        cprior = altar.vector(shape=samples)
        cdata = altar.vector(shape=samples)
        cpost = altar.vector(shape=samples)
        # a fake covariance matrix for the candidate steps, just so we don't have to rebuild it
        # every time
        csigma = altar.matrix(shape=(parameters, parameters))
        # the mask of samples rejected due to model constraint violations
        rejects = altar.vector(shape=samples)
        # and a vector with random numbers for the Metropolis acceptance
        dice = altar.vector(shape=samples)

        # step all chains together
        for step in range(self.steps):
            # notify we are advancing the chains
            dispatcher.notify(event=dispatcher.chainAdvanceStart,
                              controller=annealer)

            # initialize the candidate sample by randomly displacing the current one
            cθ = self.displace(sample=θ)
            # initialize the likelihoods
            likelihoods = cprior.zero(), cdata.zero(), cpost.zero()
            # and the covariance matrix
            csigma.zero()
            # build a candidate state
            candidate = self.CoolingStep(beta=β,
                                         theta=cθ,
                                         likelihoods=likelihoods,
                                         sigma=csigma)

            # the random displacement may have generated candidates that are outside the
            # support of the model, so we must give it an opportunity to reject them;
            # notify we are starting the verification process
            dispatcher.notify(event=dispatcher.verifyStart,
                              controller=annealer)
            # reset the mask and ask the model to verify the sample validity
            model.verify(step=candidate, mask=rejects.zero())
            # make the candidate a consistent set by replacing the rejected samples with copies
            # of the originals from {θ}
            for index, flag in enumerate(rejects):
                # if this sample was rejected
                if flag:
                    # copy the corresponding row from {θ} into {candidate}
                    cθ.setRow(index, θ.getRow(index))
            # notify that the verification process is finished
            dispatcher.notify(event=dispatcher.verifyFinish,
                              controller=annealer)

            # compute the likelihoods
            model.likelihoods(annealer=annealer, step=candidate)

            # build a vector to hold the difference of the two posterior likelihoods
            diff = cpost.clone()
            # subtract the previous posterior
            diff -= posterior
            # randomize the Metropolis acceptance vector
            dice.random(self.uniform)

            # notify we are starting accepting samples
            dispatcher.notify(event=dispatcher.acceptStart,
                              controller=annealer)

            # accept/reject: go through all the samples
            for sample in range(samples):
                # a candidate is rejected if the model considered it invalid
                if rejects[sample]:
                    # nothing to do: θ, priorL, dataL, and postL contain the right statistics
                    # for this sample; just update the rejection count
                    rejected += 1
                    # and move on
                    continue
                # a candidate is also rejected if the model considered it less likely than the
                # original and it wasn't saved by the {dice}
                if log(dice[sample]) > diff[sample]:
                    # nothing to do: θ, priorL, dataL, and postL contain the right statistics
                    # for this sample; just update the unlikely count
                    unlikely += 1
                    # and move on
                    continue

                # otherwise, update the acceptance count
                accepted += 1
                # copy the candidate sample
                θ.setRow(sample, cθ.getRow(sample))
                # and its likelihoods
                prior[sample] = cprior[sample]
                data[sample] = cdata[sample]
                posterior[sample] = cpost[sample]

            # notify we are done accepting samples
            dispatcher.notify(event=dispatcher.acceptFinish,
                              controller=annealer)

            # notify we are done advancing the chains
            dispatcher.notify(event=dispatcher.chainAdvanceFinish,
                              controller=annealer)

        # all done
        return accepted, rejected, unlikely
Esempio n. 20
0
    def cuInitialize(self, application):
        """
        cuda interface of initialization
        """
        # initialize the parent uniform distribution
        super().cuInitialize(application=application)

        # get the input path
        ifs = application.pfs["inputs"]

        # assign the rng
        self.rng = application.rng.rng

        # set the number of patches
        self.patches = self.parameters
        # initialize the area for each patch
        if len(self.area) == 1:
            # by default, assign the constant patch_area to each patch
            self.area_patches = altar.vector(shape=self.patches).fill(
                self.area[0])
        elif len(self.area) != self.patches:
            # if the size doesn't match
            channel = self.error
            raise channel.log(
                "the size of area doesn't match the number of patches")
        else:
            #
            self.area_patches = self.area

        # if a file is provided, load it
        if self.area_patch_file is not None:
            try:
                # get the path to the file
                areafile = ifs[self.area_patch_file]
            # if the file doesn't exist
            except ifs.NotFoundError:
                # grab my error channel
                channel = self.error
                # complain
                channel.log(
                    f"missing area_patch_file: no '{self.area_patch_file}' {ifs.path()}"
                )
                # and raise the exception again
                raise
            # if all goes well
            else:
                # allocate the vector
                self.area_patches = altar.vector(shape=self.patches)
                # and load the file contents into memory
                self.area_patches.load(self.areafile.uri)

        # initialize the shear modulus for each patch
        if len(self.Mu) == 1:
            # by default, assign the constant to each patch
            self.mu_patches = altar.vector(shape=self.patches).fill(self.Mu[0])
        elif len(self.Mu) != self.patches:
            # if the size doesn't match
            channel = self.error
            raise channel.log(
                "the size of Mu doesn't match the number of patches")
        else:
            #
            self.mu_patches = self.Mu

        # all done
        return self
Esempio n. 21
0
    def cuInitSample(self, theta):
        """
        Fill my portion of {theta} with initial random values from my distribution.
        """
        # use cpu to generate a batch of samples
        samples = theta.shape[0]
        parameters = self.parameters
        θ = altar.matrix(shape=(samples, parameters))

        # grab the references for area/shear modulus
        area_patches = self.area_patches
        mu_patches = self.mu_patches

        # create a gaussian distribution to generate Mw for each sample
        gaussian_Mw = altar.pdf.gaussian(mean=self.Mw_mean,
                                         sigma=self.Mw_sigma,
                                         rng=self.rng)

        # create a dirichlet distribution to generate displacements
        alpha = altar.vector(shape=parameters).fill(
            1)  # power 0, or (alpha_i = 1)
        dirichlet_D = altar.pdf.dirichlet(alpha=alpha, rng=self.rng)

        # create a tempory vector for theta of samples
        theta_sample = altar.vector(shape=parameters)

        # get the range
        low, high = self.support

        # iterate through samples to initialize samples
        for sample in range(samples):
            within_range = False
            # iterate until all samples are within support
            while within_range is False:
                # assume within_range is true in the beginning
                within_range = True
                # generate a Mw sample
                Mw = gaussian_Mw.sample()
                # Pentiar = M0 =  \sum (A_i D_i Mu_i)
                # 15 here is for GPa * Km^2, instead of Pa * m^2
                Pentier = pow(10, 1.5 * Mw + 9.1 - 15)
                # if a negative sign is desired
                if self.slip_sign == 'negative':
                    Pentier = -Pentier
                # generate a dirichlet sample \sum x_i = 1
                dirichlet_D.vector(vector=theta_sample)
                # D_i = P * x_i /A_i
                for patch in range(parameters):
                    theta_sample[patch] *= Pentier / (area_patches[patch] *
                                                      mu_patches[patch])
                    # check the range
                    if (theta_sample[patch] >= high
                            or theta_sample[patch] <= low):
                        within_range = False
                        break
            # set theta
            θ.setRow(sample, theta_sample)

        # make a copy to gpu
        gθ = altar.cuda.matrix(source=θ, dtype=self.precision)
        # insert into theta according to the idx_range
        theta.insert(src=gθ, start=(0, self.idx_range[0]))

        # and return
        return self
Esempio n. 22
0
    def initialize(self, application):
        """
        Initialize the state of the model given a problem specification
        """
        # chain up
        super().initialize(application=application)
        # initialize the parameter sets
        self.initializeParameterSets()
        # mount the workspace
        self.ifs = self.mountInputDataspace(pfs=application.pfs)

        # load the data
        data = self.loadInputs()

        # prep to swallow the inputs
        self.ticks = []
        self.d = altar.vector(shape=(3 * self.observations))
        self.cd = altar.matrix(shape=(3 * self.observations,
                                      3 * self.observations)).zero()

        # go through the data records
        for idx, rec in enumerate(data):
            # save the (t,x,y) triplet
            self.ticks.append((rec.t, rec.x, rec.y))
            # save the three components of the displacements
            self.d[3 * idx + 0] = rec.uE
            self.d[3 * idx + 1] = rec.uN
            self.d[3 * idx + 2] = rec.uZ
            # populate the covariance matrix
            self.cd[3 * idx + 0, 3 * idx + 0] = rec.σE
            self.cd[3 * idx + 1, 3 * idx + 1] = rec.σN
            self.cd[3 * idx + 2, 3 * idx + 2] = rec.σZ

        # compute the normalization
        self.normalization = self.computeNormalization()
        # compute the inverse of the covariance matrix
        self.cd_inv = self.computeCovarianceInverse()

        # save the parameter meta data
        self.meta()

        # pick an implementation strategy
        # if the user specified {fast} mode
        if self.mode == "fast":
            # attempt to
            try:
                # get the fast strategy that involves a CDM source implemented in C++
                from .Fast import Fast as strategy
            # if this fails
            except ImportError:
                # make channel
                channel = application.error
                # complain
                raise channel.log("unable to find support for <fast> mode")
        # otherwise
        else:
            # get the strategy implemented in pure python
            from .Native import Native as strategy
        # initialize it and save it
        self.strategy = strategy().initialize(application=application,
                                              model=self)

        # show me
        # self.show(job=application.job, channel=self.info)

        # all done
        return self
Esempio n. 23
0
    def loadInputsCp(self):
        """
        Load the additional data (for Cp problem) in the input files into memory
        """
        # grab the input dataspace
        ifs = self.ifs

        # the covariance/uncertainty for model parameter Cmu
        try:
            # get the path to the file
            cmuf = ifs[self.cmu_file]
        # if the file doesn't exist
        except ifs.NotFoundError:
            # grab my error channel
            channel = self.error
            # complain
            channel.log(
                f"missing data covariance matrix: no '{self.cmu_file}' in '{self.case}'"
            )
            # and raise the exception again
            raise
        # if all goes well
        else:
            # allocate the matrix
            cmu = altar.matrix(shape=(self.nCmu, self.nCmu))
            # and load the file contents into memory
            cmu.load(cmuf.uri)

        # the sensitivity kernel, Kmu ususally
        nCmu = self.nCmu
        prefix, suffix = self.kmu_file.split("[n]")
        kmu = []
        kmu_i = altar.matrix(shape=(self.observations, self.parameters))
        for i in range(nCmu):
            kmufn = prefix + str(i + 1) + suffix
            try:
                kmuf = ifs[kmufn]
            except ifs.NotFoundErr:
                channel.log(
                    f"missing sensitivity kernel: no '{kmufn}' in '{self.case}'"
                )
                raise
            else:
                kmu_i.load(kmuf.uri)
                kmu.append(kmu_i)

        # the initial model
        try:
            # get the path to the file
            initModelf = ifs[self.initialModel_file]
        # if the file doesn't exist
        except ifs.NotFoundError:
            channel.log(
                f"missing initial model file: no '{initModelf}' in '{self.case}'"
            )
            raise
        # if all goes well
        else:
            # and load the file contents into memory
            initModel = altar.vector(shape=self.parameters)
            initModel.load(initModelf.uri)

        # all done
        return kmu, cmu, initModel
Esempio n. 24
0
    def initialize(self, application):
        """
        Initialize the state of the model given a {problem} specification
        """
        # externals
        from math import sin, cos

        # chain up
        super().initialize(application=application)

        # initialize my parameter sets
        self.initializeParameterSets()
        # mount the directory with my input data
        self.ifs = self.mountInputDataspace(pfs=application.pfs)

        # load the data from the inputs into memory
        displacements, self.cd = self.loadInputs()

        # compute the normalization
        self.normalization = self.computeNormalization()
        # compute the inverse of the covariance matrix
        self.cd_inv = self.computeCovarianceInverse()

        # build the local representations
        self.points = []
        self.d = altar.vector(shape=self.observations)
        self.los = altar.matrix(shape=(self.observations, 3))
        self.oid = []
        # populate them
        for obs, record in enumerate(displacements):
            # extract the observation id
            self.oid.append(record.oid)
            # extract the (x,y) coordinate of the observation point
            self.points.append((record.x, record.y))
            # extract the observed displacement
            self.d[obs] = record.d
            # get the LOS angles
            theta = record.theta
            phi = record.phi
            # form the projection vectors and store them
            self.los[obs, 0] = sin(theta) * cos(phi)
            self.los[obs, 1] = sin(theta) * sin(phi)
            self.los[obs, 2] = cos(theta)

        # save the parameter meta data
        self.meta()

        # pick an implementation strategy
        # if the user has asked for CUDA support
        if application.job.gpus > 0:
            # attempt to
            try:
                # use the CUDA implementation
                from .CUDA import CUDA as strategy
            # if this fails
            except ImportError:
                # make a channel
                channel = application.error
                # complain
                raise channel.log("unable to find CUDA support")
        # if the user specified {fast} mode
        elif self.mode == "fast":
            # attempt to
            try:
                # get the fast strategy that involves a Reverso source implemented in C++
                from .Fast import Fast as strategy
            # if this fails
            except ImportError:
                # make channel
                channel = application.error
                # complain
                raise channel.log("unable to find support for <fast> mode")
        # otherwise
        else:
            # get the strategy implemented in pure python
            from .Native import Native as strategy
        # initialize it and save it
        self.strategy = strategy().initialize(application=application,
                                              model=self)

        # show me
        # self.show(job=application.job, channel=self.info)

        # all done
        return self
Esempio n. 25
0
    def loadInputs(self):
        """
        Load the data in the input files into memory
        """
        # grab the input dataspace
        ifs = self.ifs

        # first the green functions
        try:
            # get the path to the file
            gf = ifs[self.green]
        # if the file doesn't exist
        except ifs.NotFoundError:
            # grab my error channel
            channel = self.error
            # complain
            channel.log(
                f"missing Green functions: no '{self.green}' in '{self.case}'")
            # and raise the exception again
            raise
        # if all goes well
        else:
            # allocate the matrix
            green = altar.matrix(shape=(self.observations, self.parameters))
            # and load the file contents into memory
            green.load(gf.uri)

        # next, the observations
        try:
            # get the path to the file
            df = ifs[self.data]
        # if the file doesn't exist
        except ifs.NotFoundError:
            # grab my error channel
            channel = self.error
            # complain
            channel.log(
                f"missing observations: no '{self.data}' in '{self.case}'")
            # and raise the exception again
            raise
        # if all goes well
        else:
            # allocate the vector
            data = altar.vector(shape=self.observations)
            # and load the file contents into memory
            data.load(df.uri)

        # finally, the data covariance
        try:
            # get the path to the file
            cf = ifs[self.cd]
        # if the file doesn't exist
        except ifs.NotFoundError:
            # grab my error channel
            channel = self.error
            # complain
            channel.log(
                f"missing data covariance matrix: no '{self.cd}' in '{self.case}'"
            )
            # and raise the exception again
            raise
        # if all goes well
        else:
            # allocate the matrix
            cd = altar.matrix(shape=(self.observations, self.observations))
            # and load the file contents into memory
            cd.load(cf.uri)

        # all done
        return green, data, cd
Esempio n. 26
0
    def dataLikelihood(self, model, step):
        """
        Fill {step.data} with the likelihoods of the samples in {step.theta} given the available
        data.
        """
        # get the norm
        norm = model.norm
        # grab the portion of the sample that belongs to this model
        θ = model.restrict(theta=step.theta)
        # the observed displacements
        displacements = model.d
        # the inverse of the data covariance matrix
        cd_inv = model.cd_inv
        # the normalization
        normalization = model.normalization
        # and the storage for the data likelihoods
        dataLLK = step.data

        # find out how many samples in the set
        samples = θ.rows
        # get the parameter sets
        psets = model.psets

        # get the offsets of the various parameter sets
        QinIdx = model.Qin_idx
        HsIdx = model.Hs_idx
        HdIdx = model.Hd_idx
        asIdx = model.as_idx
        adIdx = model.ad_idx
        acIdx = model.ac_idx

        # get the locations and times of the observations
        ticks = model.ticks
        # initialize a vector to hold the expected displacements
        u = altar.vector(shape=(3 * model.observations))

        # for each sample in the sample set
        for sample in range(samples):
            # extract the parameters
            parameters = θ.getRow(sample)
            # get the flow rate
            Qin = parameters[QinIdx]
            # get the locations of the chambers
            H_s = parameters[HsIdx]
            H_d = parameters[HdIdx]
            # get the sizes
            a_s = parameters[asIdx]
            a_d = parameters[adIdx]
            a_c = parameters[acIdx]

            # make a source using the sample parameters
            reverso = source(H_s=H_s,
                             H_d=H_d,
                             a_s=a_s,
                             a_d=a_d,
                             a_c=a_c,
                             Qin=Qin,
                             G=model.G,
                             v=model.v,
                             mu=model.mu,
                             drho=model.drho,
                             g=model.g)

            # prime the displacement calculator
            predicted = reverso.displacements(locations=ticks)

            # compute the displacements
            for idx, ((t, x, y), (u_R,
                                  u_Z)) in enumerate(zip(ticks, predicted)):
                # find the polar angle of the vector to the observation location
                phi = math.atan2(y, x)
                # compute the E and N components
                u_E = u_R * math.sin(phi)
                u_N = u_R * math.cos(phi)
                # save
                u[3 * idx + 0] = u_E
                u[3 * idx + 1] = u_N
                u[3 * idx + 2] = u_Z

            # subtract the observed displacements
            u -= displacements

            # compute the norm of the displacements
            nrm = norm.eval(v=u, sigma_inv=cd_inv)
            # normalize and store it as the data log likelihood
            dataLLK[sample] = normalization - nrm**2 / 2

        # all done
        return self