예제 #1
0
def generation(Fk,Q,U,H,R,x_initial,n_iters):
	N = Fk.shape[0]
	states = sp.zeros((N,n_iters))
	states[:,0] = x_initial
	observations = sp.zeros((H.shape[0],n_iters))
	observations[:,0] = x_initial[0:2]
	mean = sp.zeros(H.shape[0])
	for i in xrange(n_iters-1):
		states[:,i+1] = sp.dot(Fk,states[:,i]) + U + rand.multivariate_normal(sp.zeros(N),Q,1)
		observations[:,i+1] = sp.dot(H,states[:,i+1]) + rand.multivariate_normal(mean,R,1)
	return states,observations
예제 #2
0
파일: pca.py 프로젝트: Yevgnen/prml
    def reconstruct(self, X):
        n_features = sp.atleast_2d(X).shape[1]
        latent = sp.dot(self.inv_M, sp.dot(self.weight.T, (X - self.predict_mean).T))
        eps = sprd.multivariate_normal(sp.zeros(n_features), self.sigma2 * sp.eye(n_features))
        recons = sp.dot(self.weight, latent) + self.predict_mean + eps

        return recons
예제 #3
0
    def _sample(self, n=1):
        """sampling"""

        rval = sp_rd.multivariate_normal(sp.zeros(self._nc),
                                         self._sample_vars[1], n)
        for k in xrange(self._sample_vars[0].shape[2]):
            rval[k] += sp.dot(self._sample_coef, self._sample_mem)
            self._sample_mem.extendleft(rval[k, ::-1])
        return rval
예제 #4
0
    def plotCurves(self, showSamples=False, force2D=True):
        from pylab import clf, hold, plot, fill, title, gcf, pcolor, gray

        if not self.calculated:
            self._calculate()

        if self.indim == 1:
            clf()
            hold(True)
            if showSamples:
                # plot samples (gray)
                for _ in range(5):
                    plot(
                        self.testx,
                        self.pred_mean + random.multivariate_normal(zeros(len(self.testx)), self.pred_cov),
                        color="gray",
                    )

            # plot training set
            plot(self.trainx, self.trainy, "bx")
            # plot mean (blue)
            plot(self.testx, self.pred_mean, "b", linewidth=1)
            # plot variance (as "polygon" going from left to right for upper half and back for lower half)
            fillx = r_[ravel(self.testx), ravel(self.testx[::-1])]
            filly = r_[self.pred_mean + 2 * diag(self.pred_cov), self.pred_mean[::-1] - 2 * diag(self.pred_cov)[::-1]]
            fill(fillx, filly, facecolor="gray", edgecolor="white", alpha=0.3)
            title("1D Gaussian Process with mean and variance")

        elif self.indim == 2 and not force2D:
            from matplotlib import axes3d as a3

            fig = gcf()
            fig.clear()
            ax = a3.Axes3D(fig)  # @UndefinedVariable

            # plot training set
            ax.plot3D(ravel(self.trainx[:, 0]), ravel(self.trainx[:, 1]), ravel(self.trainy), "ro")

            # plot mean
            (x, y, z) = map(
                lambda m: m.reshape(sqrt(len(m)), sqrt(len(m))), (self.testx[:, 0], self.testx[:, 1], self.pred_mean)
            )
            ax.plot_wireframe(x, y, z, colors="gray")
            return ax

        elif self.indim == 2 and force2D:
            # plot mean on pcolor map
            gray()
            # (x, y, z) = map(lambda m: m.reshape(sqrt(len(m)), sqrt(len(m))), (self.testx[:,0], self.testx[:,1], self.pred_mean))
            m = floor(sqrt(len(self.pred_mean)))
            pcolor(self.pred_mean.reshape(m, m)[::-1, :])

        else:
            print("plotting only supported for indim=1 or indim=2.")
    def _nobj_PI(self,mu,sigma):
        cov = diag(array(sigma)**2)
        rands = random.multivariate_normal(mu,cov,self.n)
        num = 0 #number of cases that dominate the current Pareto set

        for random_sample in rands:
            for par_point in self.y_star:
                #par_point = [p[2] for p in par_point.outputs]
                if self._dom(par_point,random_sample):
                    num = num+1
                    break
        pi = (self.n-num)/float(self.n)
        return pi
예제 #6
0
    def query(self, size=1):
        """return noise samples

        :Parameters:
            size : int
                Number of samples to produce.
                Default=1
        """

        return NR.multivariate_normal(
            self.mu,
            self.sigma,
            size
        )
예제 #7
0
def _ar_model_sim(A, C, n=1, n_discard=0, mean=None, check=False):
    """simulates data for a VARMA model

    :Parameters:
        A : ndarray
            AR Coeffitient matrix
        C : ndarray
            Noise covariance matrix
        n : int
            Number od datapoints to simulate with the process
        n_discard : int
            Number od datapoints to discard befor taking datapoints for the
            simulation.
        mean : ndarray
            Mean vector for the process
    """

    # inits and checks
    if mean is None:
        mean = N.zeros(A.shape[0])
    else:
        if mean.size != A.shape[0]:
            raise ValueError('mean vector has wrong shape!')
    m = C.shape[0]
    p = A.shape[1] / m
    if p != round(p):
        raise ValueError('Bad arguments, p not an integer! %s' % p)

    # check for stable model
    if check is True:
        ar_model_check_stable(A)

    # generate data
    err = NR.multivariate_normal(mean, C, n_discard + n)
    x = N.zeros((p, m))
    rval = N.zeros((p + n_discard + n, m))

    for k in xrange(p, p + n_discard + n):
        for j in xrange(p):
            x[j, :] = N.dot(rval[k - j - 1, :], A[:, j * m:(j + 1) * m])
        rval[k, :] = x.sum(axis=0) + err[k - p, :]

    # return
    return rval[p + n_discard:, :]
예제 #8
0
파일: mlens3.py 프로젝트: stefanv/MLTP
	def _filt_run(self,dat,filt,do_sim=False,vplot=True,nrange=1):
		
		if self.doplot and vplot:
			errorbar(dat[0],dat[1],dat[2],fmt="o")
		
		new = True
		if new:
			mymodel = Model(self.fitfunc_small_te,extra_args=[dat[1],dat[2],False])
		else:
			mymodel = Model(self.fitfunc_te) #,extra_args=[dat[1],dat[2],False])
			
		# get some good guesses
		try:
			scale = trim_mean(dat[1],0.3)
		except:
			scale = mean(dat[1])
		offset = 1.0 #trim_mean(dat[1],0.3)
		t0    = median(dat[0])
		umin  = 1.0
		b     = 0.0  ## trending slope
		mydata  = RealData(dat[0],dat[1],sx=1.0/(60*24),sy=dat[2])
		
		trange = list(linspace(min(dat[0]),max(dat[0]),nrange))
		maxi = (dat[1] == max(dat[1])).nonzero()[0]		
		trange.extend(list(dat[0][maxi]))
		trange.extend([t0, max(dat[0]) + 10, max(dat[0]) + 100])
		
		final_output = None
		for t0i in trange:
			for te in 10**linspace(log10(2),log10(200),nrange):
				if new:
					pinit = [te,umin,t0i] # ,scale,offset,b]
				else:
					pinit = [te,umin,t0i ,scale,offset,b]
				
				myodr = ODR(mydata,mymodel,beta0=pinit)
				myoutput = myodr.run()
				if final_output is None:
					final_output = myoutput
					old_sd_beta = final_output.sd_beta
					continue

				if trim_mean(log10(myoutput.sd_beta / final_output.sd_beta),0.0) < 0.0 and \
					myoutput.res_var <= final_output.res_var and (myoutput.sd_beta == 0.0).sum() <= (final_output.sd_beta == 0.0).sum():
					final_output = myoutput
					
		if 1:
			t = linspace(min(dat[0]),max([max(dat[0]),final_output.beta[2] + 6*final_output.beta[0]]),1500)
			if new:
				tmp = self.fitfunc_small_te(final_output.beta,dat[0],dat[1],dat[2],True)
				#print tmp, "***"
				p = list(final_output.beta)
				p.extend([tmp[0],tmp[1],tmp[2]])
				y = array(self.modelfunc_small_te(p,t))
			else:
				p = final_output.beta
				y = self.fitfunc_te(final_output.beta,t)
				#print final_output.beta 
			if self.doplot:
				plot(t,y)
				xlabel('Time [days]')
				ylabel('Relative Flux Density')
			
			if do_sim:
				for i in range(10):
					tmp = r.multivariate_normal(myoutput.beta, myoutput.cov_beta)
					if self.doplot:
						plot(t, self.a_te(tmp[0],tmp[1],tmp[2],tmp[3],tmp[4],tmp[5],t),"-")
			
		return (final_output, p, new)
예제 #9
0
 def draw(self):
     if not self.calculated:
         self._calculate()
     
     return self.pred_mean + random.multivariate_normal(zeros(len(self.testx)), self.pred_cov)
예제 #10
0
    def draw(self):
        if not self.calculated:
            self._calculate()

        return self.pred_mean + random.multivariate_normal(
            zeros(len(self.testx)), self.pred_cov)
예제 #11
0
m = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 1], [0, 0, 1]]).T

# eigenvalues gaussian covariance matrices
lam = np.array([.12, .11, .17, .14])

# sampling and labeling
X = np.zeros((n, N))
rnd = random.rand(N)

temp = np.insert(np.cumsum(class_prior), 0, 0)

y = np.zeros(N)
for i in range(NC):
    ind = (rnd >= temp[i]) & (rnd < temp[i + 1])
    y[ind] = i + 1
    X[:, ind] = random.multivariate_normal(m[:, i],
                                           np.eye(n) * lam[i], np.sum(ind)).T

#-------------------------------------------------------------------------
# Part A: minimum probability of error classification (MAP classification)
#-------------------------------------------------------------------------
decisionMAP = np.zeros(N)

for i in range(N):
    conditional = np.zeros(NC)

    for j in range(NC):
        conditional[j] = multivariate_normal.pdf(X[:, i],
                                                 mean=m[:, j],
                                                 cov=(np.eye(n) * lam[j]))

    decisionMAP[i] = np.argmax(conditional * class_prior) + 1
예제 #12
0
    def _filt_run(self, dat, filt, do_sim=False, vplot=True, nrange=1):

        if self.doplot and vplot:
            errorbar(dat[0], dat[1], dat[2], fmt="o")

        new = True
        if new:
            mymodel = Model(self.fitfunc_small_te,
                            extra_args=[dat[1], dat[2], False])
        else:
            mymodel = Model(
                self.fitfunc_te)  #,extra_args=[dat[1],dat[2],False])

        # get some good guesses
        try:
            scale = trim_mean(dat[1], 0.3)
        except:
            scale = mean(dat[1])
        offset = 1.0  #trim_mean(dat[1],0.3)
        t0 = median(dat[0])
        umin = 1.0
        b = 0.0  ## trending slope
        mydata = RealData(dat[0], dat[1], sx=1.0 / (60 * 24), sy=dat[2])

        trange = list(linspace(min(dat[0]), max(dat[0]), nrange))
        maxi = (dat[1] == max(dat[1])).nonzero()[0]
        trange.extend(list(dat[0][maxi]))
        trange.extend([t0, max(dat[0]) + 10, max(dat[0]) + 100])

        final_output = None
        for t0i in trange:
            for te in 10**linspace(log10(2), log10(200), nrange):
                if new:
                    pinit = [te, umin, t0i]  # ,scale,offset,b]
                else:
                    pinit = [te, umin, t0i, scale, offset, b]

                myodr = ODR(mydata, mymodel, beta0=pinit)
                myoutput = myodr.run()
                if final_output is None:
                    final_output = myoutput
                    old_sd_beta = final_output.sd_beta
                    continue

                if trim_mean(log10(myoutput.sd_beta / final_output.sd_beta),0.0) < 0.0 and \
                 myoutput.res_var <= final_output.res_var and (myoutput.sd_beta == 0.0).sum() <= (final_output.sd_beta == 0.0).sum():
                    final_output = myoutput

        if 1:
            t = linspace(
                min(dat[0]),
                max([
                    max(dat[0]),
                    final_output.beta[2] + 6 * final_output.beta[0]
                ]), 1500)
            if new:
                tmp = self.fitfunc_small_te(final_output.beta, dat[0], dat[1],
                                            dat[2], True)
                #print tmp, "***"
                p = list(final_output.beta)
                p.extend([tmp[0], tmp[1], tmp[2]])
                y = array(self.modelfunc_small_te(p, t))
            else:
                p = final_output.beta
                y = self.fitfunc_te(final_output.beta, t)
                #print final_output.beta
            if self.doplot:
                plot(t, y)
                xlabel('Time [days]')
                ylabel('Relative Flux Density')

            if do_sim:
                for i in range(10):
                    tmp = r.multivariate_normal(myoutput.beta,
                                                myoutput.cov_beta)
                    if self.doplot:
                        plot(
                            t,
                            self.a_te(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4],
                                      tmp[5], t), "-")

        return (final_output, p, new)
예제 #13
0
mu2 = np.array([5, 3])
cov2 = np.eye(2) * 2

mu3 = np.array([8, 12])
cov3 = np.array([3.4, 0, 0, 5.1]).reshape(2, 2)

# multinom params
p1 = 0.4
p2 = 0
p3 = 1 - p2 - p1

# random draws
rnd.seed(1)
knum = rnd.multinomial(draws, (p1, p2, p3))
gaus1 = rnd.multivariate_normal(mu1, cov1, knum[0])
gaus2 = rnd.multivariate_normal(mu2, cov2, knum[1])
gaus3 = rnd.multivariate_normal(mu3, cov3, knum[2])

# join columns into dataframe
x1 = pd.Series(np.r_[gaus1[:, 0], gaus2[:, 0], gaus3[:, 0]])
x2 = pd.Series(np.r_[gaus1[:, 1], gaus2[:, 1], gaus3[:, 1]])
c = pd.Series(np.r_[np.zeros(knum[0]), np.ones(knum[1]), np.ones(knum[2]) * 2])
dat = {"x1": x1, "x2": x2, "c": c}
clustData = pd.DataFrame(dat)

# plot clusters
#plt.scatter(clustData["x1"], clustData["x2"], c = clustData["c"])
#plt.show()

### Set Clustering Prior Paramteres ###
예제 #14
0
파일: multigibbs.py 프로젝트: kousu/stat440
def multinormal(n, Mu, Sigma):
    return random.multivariate_normal(Mu, Sigma, n)