예제 #1
0
def transitionAndWeight(states, y, parameters, t):
    Nx = states.shape[0]
    Ntheta = states.shape[2]
    weights = zeros((Nx, Ntheta))
    newstates = zeros_like(states)
    poissonparameters1 = parameters[6, :] * parameters[4, :] * (parameters[2, :]**2) / parameters[3, :]
    poissonparameters2 = (1 - parameters[6, :]) * (parameters[4, :] + \
            parameters[5, :]) * (parameters[2, :]**2) / parameters[3, :]
    poissonparameters1 = repeat(poissonparameters1[:,newaxis], Nx, axis = 1)
    poissonparameters2 = repeat(poissonparameters2[:,newaxis], Nx, axis = 1)
    for indextheta in range(Ntheta):
        allK1 = array(random.poisson(lam = array(poissonparameters1[indextheta,:]))).reshape(Nx)
        allK1[allK1 > 10**4] = 10**4
        allK1 = array(allK1).reshape(Nx)
        sumK1 = numpysum(allK1)
        allK2 = array(random.poisson(lam = poissonparameters2[indextheta,:])).reshape(Nx)
        allK2[allK2 > 10**4] = 10**4
        allK2 = array(allK2).reshape(Nx)
        sumK2 = numpysum(allK2)
        alluniforms1 = random.uniform(size = 2 * sumK1)
        alluniforms2 = random.uniform(size = 2 * sumK2)
        subresults = subtransitionAndWeight(states[..., indextheta], y, parameters[:, indextheta], \
                         alluniforms1, allK1, alluniforms2, allK2)
        newstates[..., indextheta] = subresults["states"]
        weights[..., indextheta] = subresults["weights"]
    return {"states": newstates , "weights": weights}
예제 #2
0
def transitionAndWeight(states, y, parameters, t):
    Nx = states.shape[0]
    Ntheta = states.shape[2]
    weights = zeros((Nx, Ntheta))
    newstates = zeros_like(states)
    poissonparameters1 = parameters[6, :] * parameters[4, :] * (
        parameters[2, :]**2) / parameters[3, :]
    poissonparameters2 = (1 - parameters[6, :]) * (parameters[4, :] + \
            parameters[5, :]) * (parameters[2, :]**2) / parameters[3, :]
    poissonparameters1 = repeat(poissonparameters1[:, newaxis], Nx, axis=1)
    poissonparameters2 = repeat(poissonparameters2[:, newaxis], Nx, axis=1)
    for indextheta in range(Ntheta):
        allK1 = array(
            random.poisson(
                lam=array(poissonparameters1[indextheta, :]))).reshape(Nx)
        allK1[allK1 > 10**4] = 10**4
        allK1 = array(allK1).reshape(Nx)
        sumK1 = numpysum(allK1)
        allK2 = array(
            random.poisson(lam=poissonparameters2[indextheta, :])).reshape(Nx)
        allK2[allK2 > 10**4] = 10**4
        allK2 = array(allK2).reshape(Nx)
        sumK2 = numpysum(allK2)
        alluniforms1 = random.uniform(size=2 * sumK1)
        alluniforms2 = random.uniform(size=2 * sumK2)
        subresults = subtransitionAndWeight(states[..., indextheta], y, parameters[:, indextheta], \
                         alluniforms1, allK1, alluniforms2, allK2)
        newstates[..., indextheta] = subresults["states"]
        weights[..., indextheta] = subresults["weights"]
    return {"states": newstates, "weights": weights}
예제 #3
0
def computeCovarianceAndMean2(X, unnormalizedw):
    weights = unnormalizedw / numpysum(unnormalizedw)
    Xbar = average(X, weights = w, axis = 0)
    code = \
    """
    int row,col;
    for (row = 0; row < d(0); row++)
    {
        for(col = 0; col < d(0); col++)
        {
          for (int index = 0; index < N(0); index ++){
            covariance(row, col) += weights(index) * (X(index, row) - Xbar(row)) * (X(index, col) - Xbar(col));
          }
        }
    }
    """
    #y = array([y])
    #Nx = states.shape[0]
    #Ntheta = states.shape[2]
    #weights = zeros((Nx, Ntheta))
    #noise = random.normal(size = (Nx, Ntheta), loc = 0, scale = 1)
    d = X.shape[1]
    print "d:", d
    covariance = zeros((d, d))
    d = array([d])
    N = array([X.shape[0]])
    weave.inline(code,['covariance', 'd', 'N', 'Xbar', 'X', 'weights'], \
        type_converters=weave.converters.blitz, libraries = ["m"])
    weightedcovariance = covariance / (1 - numpysum(power(weights, 2)))
    return {"mean": Xbar, "cov": weightedcovariance}
예제 #4
0
def computeCovarianceAndMean(X, unnormalizedw):
    w = unnormalizedw / numpysum(unnormalizedw)
    weightedmean = average(X, weights = w, axis = 0)
    diagw = diag(w)
    part1 = dot(transpose(X), dot(diagw, X))
    Xtw = dot(transpose(X), w[:, newaxis])
    part2 = dot(Xtw, transpose(Xtw))
    numerator = part1 - part2
    denominator = 1 - numpysum(w**2)
    weightedcovariance = numerator / denominator
    # increase a little bit the diagonal to prevent degeneracy effects
    #weightedcovariance += diag(zeros(self.modeltheta.parameterdimension) + 10**(-4)/self.modeltheta.parameterdimension)
    return {"mean": weightedmean, "cov": weightedcovariance}
예제 #5
0
def rosen(coeffs):
    """evaluates n-dimensional Rosenbrock function for a list of coeffs
minimum is f(x)=0.0 at xi=1.0"""
    x = [1]*2 # ensure that there are 2 coefficients
    x[:len(coeffs)]=coeffs
    x = asarray(x) #XXX: must be a numpy.array
    return numpysum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)#,axis=0)
예제 #6
0
파일: dejong.py 프로젝트: yodeng/pathos
def rosen(coeffs):
    """evaluates n-dimensional Rosenbrock function for a list of coeffs
minimum is f(x)=0.0 at xi=1.0"""
    x = [1]*2 # ensure that there are 2 coefficients
    x[:len(coeffs)]=coeffs
    x = asarray(x) #XXX: must be a numpy.array
    return numpysum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)#,axis=0)
예제 #7
0
파일: poly.py 프로젝트: nadiiaaii/mystic
 def __init__(self,order=8,name='poly',metric=lambda x: numpysum(x*x),sigma=1.0):
     Polynomial.__init__(self,name,metric,sigma)
     if order == 2:  self.coeffs = chebyshev2coeffs
     elif order == 4:  self.coeffs = chebyshev4coeffs
     elif order == 6:  self.coeffs = chebyshev6coeffs
     elif order == 8:  self.coeffs = chebyshev8coeffs
     elif order == 16:  self.coeffs = chebyshev16coeffs
     else: raise NotImplementedError("provide self.coeffs 'by hand'")
     return
예제 #8
0
파일: poly.py 프로젝트: agamdua/mystic
 def __init__(self,order=8,name='poly',metric=lambda x: numpysum(x*x),sigma=1.0):
     Polynomial.__init__(self,name,metric,sigma)
     if order == 2:  self.coeffs = chebyshev2coeffs
     elif order == 4:  self.coeffs = chebyshev4coeffs
     elif order == 6:  self.coeffs = chebyshev6coeffs
     elif order == 8:  self.coeffs = chebyshev8coeffs
     elif order == 16:  self.coeffs = chebyshev16coeffs
     else: raise NotImplementedError, "provide self.coeffs 'by hand'"
     return
예제 #9
0
def rosenbrock(x):
    """
    Rosenbrock function:

    A modified second De Jong function, Equation (18) of [2]

    minimum is f(x)=0.0 at xi=1.0
    """
    #ensure that there are 2 coefficients
    assert len(x) >= 2
    x = asarray(x)
    return numpysum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)
예제 #10
0
    def function(self,coeffs):
        """evaluates an N-dimensional Rosenbrock saddle for a list of coeffs

f(x) = \sum_(i=0)^(N-2) 100*(x_(i+1) - x_(i)^(2))^(2) + (1 - x_(i))^(2)

Inspect with mystic_model_plotter using::
    mystic.models.rosen -b "-3:3:.1, -1:5:.1, 1" -d -x 1

The minimum is f(x)=0.0 at x_i=1.0 for all i"""
        coeffs = asarray(coeffs) #XXX: must be a numpy.array
        x = ones_like(coeffs) #XXX: ensure > 1 coeffs ?
        x[:len(coeffs)]=coeffs
        return numpysum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0, axis=self.axis).tolist()
예제 #11
0
    def function(self,coeffs):
        r"""evaluates an N-dimensional Rosenbrock saddle for a list of coeffs

f(x) = \sum_(i=0)^(N-2) 100*(x_(i+1) - x_(i)^(2))^(2) + (1 - x_(i))^(2)

Inspect with mystic_model_plotter using::
    mystic.models.rosen -b "-3:3:.1, -1:5:.1, 1" -d -x 1

The minimum is f(x)=0.0 at x_i=1.0 for all i"""
        coeffs = asarray(coeffs) #XXX: must be a numpy.array
        x = ones_like(coeffs) #XXX: ensure > 1 coeffs ?
        x[:len(coeffs)]=coeffs
        return numpysum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0, axis=self.axis).tolist()
예제 #12
0
파일: dejong.py 프로젝트: vt100/mystic
    def function(self,coeffs):
        """evaluates an N-dimensional Rosenbrock saddle for a list of coeffs

f(x) = \sum_(i=0)^(N-2) 100*(x_(i+1) - x_(i)^(2))^(2) + (1 - x_(i))^(2)

Inspect with mystic_model_plotter using::
    mystic.models.rosen -b "-3:3:.1, -1:5:.1, 1" -d -x 1

The minimum is f(x)=0.0 at x_i=1.0 for all i"""
        x = [1]*2 # ensure that there are 2 coefficients
        x[:len(coeffs)]=coeffs
        x = asarray(x) #XXX: must be a numpy.array
        return numpysum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)#,axis=0)
예제 #13
0
    def function(self,coeffs):
        """evaluates an N-dimensional Rosenbrock saddle for a list of coeffs

f(x) = \sum_(i=0)^(N-2) 100*(x_(i+1) - x_(i)^(2))^(2) + (1 - x_(i))^(2)

Inspect with mystic_model_plotter using::
    mystic.models.rosen -b "-3:3:.1, -1:5:.1, 1" -d -x 1

The minimum is f(x)=0.0 at x_i=1.0 for all i"""
        x = [1]*2 # ensure that there are 2 coefficients
        x[:len(coeffs)]=coeffs
        x = asarray(x) #XXX: must be a numpy.array
        return numpysum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)#,axis=0)
예제 #14
0
    def __init__(self,name='dummy',metric=lambda x: numpysum(x*x),sigma=1.0):
        """
Provides a base class for mystic models.

Inputs::
    name    -- a name string for the model
    metric  -- the cost metric object  [default => lambda x: numpy.sum(x*x)]
    sigma   -- a scaling factor applied to the raw cost
        """
        self.__name__ = name
        self.__metric__ = metric
        self.__sigma__ = sigma
        self.__forward__ = None
        self.__cost__ = None
        return
예제 #15
0
def fastWeightedCov(X, unnormalizedw):
    weights = unnormalizedw / numpysum(unnormalizedw)
    Xbar = average(X, weights = weights, axis = 0)
    code = \
    """
    int row,col;
    for (row = 0; row < d(0); row++)
    {
        for(col = 0; col < d(0); col++)
        {
          for (int index = 0; index < N(0); index ++){
            covariance(row, col) += weights(index) * (X(index, row) - Xbar(row)) * (X(index, col) - Xbar(col));
          }
        }
    }
    """
    d = X.shape[1]
    covariance = zeros((d, d))
    d = array([d])
    N = array([X.shape[0]])
    weave.inline(code,['covariance', 'd', 'N', 'Xbar', 'X', 'weights'], \
        type_converters=weave.converters.blitz, libraries = ["m"])
    weightedcovariance = covariance / (1 - numpysum(power(weights, 2)))
    return {"mean": Xbar, "cov": weightedcovariance}
예제 #16
0
def fastWeightedCov(X, unnormalizedw):
    weights = unnormalizedw / numpysum(unnormalizedw)
    Xbar = average(X, weights=weights, axis=0)
    code = \
    """
    int row,col;
    for (row = 0; row < d(0); row++)
    {
        for(col = 0; col < d(0); col++)
        {
          for (int index = 0; index < N(0); index ++){
            covariance(row, col) += weights(index) * (X(index, row) - Xbar(row)) * (X(index, col) - Xbar(col));
          }
        }
    }
    """
    d = X.shape[1]
    covariance = zeros((d, d))
    d = array([d])
    N = array([X.shape[0]])
    weave.inline(code,['covariance', 'd', 'N', 'Xbar', 'X', 'weights'], \
        type_converters=weave.converters.blitz, libraries = ["m"])
    weightedcovariance = covariance / (1 - numpysum(power(weights, 2)))
    return {"mean": Xbar, "cov": weightedcovariance}
예제 #17
0
    def __init__(self,name='dummy',metric=lambda x: numpysum(x*x),sigma=1.0):
        """
Provides a base class for mystic models.

Inputs::
    name    -- a name string for the model
    metric  -- the cost metric object  [default => lambda x: numpy.sum(x*x)]
    sigma   -- a scaling factor applied to the raw cost
        """
        self.__name__ = name
        self.__metric__ = metric
        self.__sigma__ = sigma
        self.__forward__ = None
        self.__cost__ = None
        return
예제 #18
0
 def __init__(self,name='lorentz',metric=lambda x: numpysum(x*x),sigma=1.0):
     AbstractModel.__init__(self,name,metric,sigma)
     return
예제 #19
0
def cost_function(params):
    x = data(params)[1] - datapts
    return numpysum(real((conjugate(x)*x)))
예제 #20
0
 def getTotalLogLike(self):
     csts = numpysum(self.constants)
     #csts[isnan(csts)] = -(10**150)
     #csts[isinf(csts)] = -(10**150)
     return self.totalLogLike + csts
예제 #21
0
 def __init__(self,name='lorentz',metric=lambda x: numpysum(x*x),sigma=1.0):
     AbstractModel.__init__(self,name,metric,sigma)
     return
예제 #22
0
def cost_function(params):
    x = data(params)[1] - datapts
    return numpysum(real((conjugate(x)*x)))
예제 #23
0
파일: br8.py 프로젝트: mrakitin/mystic
 def __init__(self,name='decay',metric=lambda x: numpysum(x*x)):
     AbstractModel.__init__(self,name,metric)
     return
예제 #24
0
 def getTotalLogLike(self):
     csts = numpysum(self.constants, axis=0)
     csts[isnan(csts)] = -(10**150)
     csts[isinf(csts)] = -(10**150)
     return self.totalLogLike + csts
예제 #25
0
파일: br8.py 프로젝트: agamdua/mystic
 def __init__(self,name='decay',metric=lambda x: numpysum(x*x)):
     AbstractModel.__init__(self,name,metric)
     return