예제 #1
0
    def normalization(X):
	P = X.shape[1]
	for p in xrange(P):		
	    mean= np.mean(X[:,p])
	    variance = np.variance(X[:,p])
	    X[:,p]= (X[:,p] - mean)/variance 
	return X	
예제 #2
0
def normalize_audio_feature(audio_feature: np.ndarray, per_frame=False):
    """ Mean and variance normalization """
    axis = 1 if per_frame else None
    mean = np.mean(audio_feature, axis=axis)
    std_dev = np.sqrt(np.variance(audio_feature, axis=axis) + 1e-9)
    normalized = (audio_feature - mean) / std_dev
    return normalized
예제 #3
0
 def normalization(X):
     P = X.shape[1]
     for p in xrange(P):
         mean = np.mean(X[:, p])
         variance = np.variance(X[:, p])
         X[:, p] = (X[:, p] - mean) / variance
     return X
예제 #4
0
def delta_mf_variance_pooled(delta_mf, bootstrap_weight_array):
    """
  Get a bootstrap estimator of the variance of delta_mf, pooling equally across all temporal differences rather than 
  treating each (S, A) pair differently.

  :param delta_mf: array of temporal differences
  :param bootstrap_weight_array: number_of_bootstrap_replicates x len(td) - size array of bootstrap multipliers
  :return:
  """
    n = len(delta_mf)
    bootstrapped_deltas = np.zeros((0, n))
    for multiplier in bootstrap_weight_array:
        delta_b = np.multiply(delta_mf, multiplier)
        bootstrapped_deltas = np.vstack((bootstrapped_deltas, delta_b))
    elementwise_variances = np.variance(bootstrapped_deltas, axis=0)
    return np.mean(elementwise_variances), bootstrapped_deltas
예제 #5
0
    def covariates_variance(self):
        r"""Variance explained by the covariates.

        It is defined as

        .. math::

            \sigma_a^2 = \sum_{s=1}^p \left\{ \sum_{i=1}^n \left(
                \mathrm M_{i,s}\beta_s - \sum_{j=1}^n
                \frac{\mathrm M_{j,s}\beta_s}{n} \right)^2 \Big/ n
            \right\}

        where :math:`p` is the number of covariates and :math:`n` is the number
        of individuals. One can show that it amounts to
        :math:`\sum_s \beta_s^2` whenever the columns of :math:`\mathrm M`
        are normalized to have mean and standard deviation equal to zero and
        one, respectively.
        """
        return fsum(variance(self.M * self.beta, axis=0))
예제 #6
0
def delta_mb_bias_and_variance_pooled(delta_mb, q_fn, env, gamma, X, Sp1,
                                      transition_model_fitter,
                                      bootstrap_weight_array):
    """
  Estimate sampling variance of delta_mb by fitting model to bootstrapped samples of the data (and pooling).
  """
    n = X.shape[0]
    transition_model = transition_model_fitter()
    bootstrapped_deltas = np.zeros((0, n))
    for multiplier in bootstrap_weight_array:
        X_b = np.multiply(multiplier, X_b)
        transition_model_fitter.fit(X_b, Sp1)
        R_b = transition_model_fitter.expected_reward(X)
        expected_q_max_array, _ = expected_q_max(q_fn, X, env,
                                                 transition_model)
        delta_b = R_b + gamma * expected_q_max_array - q_fn(X)
        bootstrapped_deltas = np.vstack((bootstrapped_deltas, delta_b))
    elementwise_variances = np.variance(bootstrapped_deltas, axis=0)
    elementwise_means = np.mean(bootstrapped_deltas, axis=0)
    return np.mean(elementwise_variances
                   ), bootstrapped_deltas, delta_mb - elementwise_means
예제 #7
0
 def variance_intensity(self):
     return np.variance(self.intensity_image[self.image])
예제 #8
0
파일: rvm.py 프로젝트: bxy8804/pyiacsun
    def oneIteration(self):

        #*****************
        # Decision phase
        #*****************

        # Compute change in likelihood
        deltaML = np.zeros((self.MFull, 1))
        action = self.actionReestimate * np.ones((self.MFull, 1))
        usedFactor = self.factor[self.Used]
        N, M = self.PHI.shape

        # Reestimation
        iu = np.where(usedFactor > self.controls['ZeroFactor'])[0]
        index = self.Used[iu]
        newAlpha = self.SOut[index]**2 / self.factor[index]
        delta = 1.0 / newAlpha - 1.0 / self.Alpha[iu]

        # Quick computation of change in log-likelihood given all re-estimations
        deltaML[index] = 0.5 * (delta * self.QIn[index]**2 /
                                (delta * self.SIn[index] + 1.0) -
                                np.log(1.0 + self.SIn[index] * delta))

        # Deletion
        iu = np.where(usedFactor <= self.controls['ZeroFactor'])[0]
        index = self.Used[iu]
        anyToDelete = (not index.size == 0) and M > 1

        if (anyToDelete):
            deltaML[index] = -0.5 * (
                self.QOut[index]**2 / (self.SOut[index] + self.Alpha[iu]) -
                np.log(1.0 + self.SOut[index] / self.Alpha[iu]))
            action[index] = self.actionDelete

# Addition
        GoodFactor = self.factor > self.controls['ZeroFactor']
        GoodFactor[self.Used] = 0
        GoodFactor[self.alignedOut] = 0
        index = np.where(GoodFactor)[0]
        anyToAdd = (not index.size == 0)

        if (anyToAdd):
            # Quick computation of change in log-likelihood
            quot = self.QIn[index]**2 / self.SIn[index]
            deltaML[index] = 0.5 * (quot - 1.0 - np.log(quot))
            action[index] = self.actionAdd

        if (anyToAdd and self.controls['PriorityAddition']) or (
                anyToDelete and self.controls['PriorityDeletion']):

            # We won't perform re-estimation this iteration
            deltaML[action == self.actionReestimate] = 0

            # We should enforce add if preferred and delete
            if (anyToAdd and self.controls['PriorityAddition']
                    and (not self.controls['PriorityDeletion'])):
                deltaML[action == self.actionDelete] = 0

            if (anyToDelete and self.controls['PriorityDeletion']
                    and (not self.controls['PriorityAddition'])):
                deltaML[action == self.actionAdd] = 0

# Finally, choose the action that results in the greatest change in likelihood
        nu = np.atleast_1d(np.argmax(deltaML))
        self.deltaLogMarginal = deltaML[nu]
        self.selectedAction = action[nu]
        anyWorthWhileAction = self.deltaLogMarginal > 0

        # If basis nu is already in the model, find its index
        j = []
        if (self.selectedAction
                == self.actionReestimate) or (self.selectedAction
                                              == self.actionDelete):
            j = np.where(self.Used == nu)[0]

        self.Phi = np.atleast_2d(self.basis[:, nu])
        newAlpha = self.SOut[nu]**2 / self.factor[nu]

        change = np.abs(np.log(newAlpha) - np.log(self.Alpha[j]))

        if (not anyWorthWhileAction) or (
            (self.selectedAction == self.actionReestimate) and
            (change < self.controls['MinDeltaLogAlpha']) and
            (not anyToDelete)):
            self.selectedAction = self.actionTerminate

# Alignment checks for addition
        if (self.selectedAction == self.actionAdd):
            p = np.dot(self.Phi.T, self.PHI)
            findAligned = np.where(p > self.controls['AlignmentMax'])[0]
            numAligned = findAligned.size

            if (numAligned > 0):
                # The added basis function is effectively indistinguishable from one present already
                self.selectedAction = self.actionAlignmentSkip
                self.alignDeferCount += 1

                # Take note not to try this again
                self.alignedOut = np.append(self.alignedOut,
                                            nu * np.ones(
                                                (numAligned, 1))).astype(int)
                self.alignedIn = np.append(self.alignedIn,
                                           self.Used[findAligned])

# Alignment checks for deletion
        if (self.selectedAction == self.actionDelete):
            findAligned = np.where(self.alignedIn == nu)[0]
            numAligned = findAligned.size
            if (numAligned > 0):
                reinstated = self.alignedOut[findAligned]
                self.alignedIn = np.delete(self.alignedIn, findAligned, 0)
                self.alignedOut = np.delete(self.alignedOut, findAligned, 0)

#*****************
# Action phase
#*****************
        updateRequired = False

        if (self.selectedAction == self.actionReestimate):
            # Basis function nu is already in the model and we're reeestimatig its alpha
            oldAlpha = self.Alpha[j]
            self.Alpha[j] = newAlpha
            S_j = self.Sigma[:, j]
            deltaInv = 1.0 / (newAlpha - oldAlpha)
            kappa = 1.0 / (self.Sigma[j, j] + deltaInv)
            tmp = kappa * S_j
            newSigma = self.Sigma - np.dot(tmp, S_j.T)
            deltaMu = -self.Mu[j] * tmp
            self.Mu += deltaMu

            self.SIn += kappa * np.dot(self.betaBasisPHI, S_j)**2
            self.QIn -= np.dot(self.betaBasisPHI, deltaMu)

            self.updateCount += 1
            updateRequired = True

        elif (self.selectedAction == self.actionAdd):
            self.basisPhi = np.dot(self.basis.T, self.Phi)
            self.basisPHI = np.hstack((self.basisPHI, self.basisPhi))
            self.BPhi = self.beta * self.Phi
            self.BASISBPhi = self.beta * self.basisPhi

            tmp = np.dot(np.dot(self.BPhi.T, self.PHI), self.Sigma).T

            self.Alpha = np.vstack((self.Alpha, newAlpha))
            self.PHI = np.hstack((self.PHI, self.Phi))
            s_ii = 1.0 / (newAlpha + self.SIn[nu])
            s_i = -s_ii * tmp
            TAU = -np.dot(s_i, tmp.T)

            t1 = np.hstack((self.Sigma + TAU, s_i))
            t2 = np.hstack((s_i.T, s_ii))
            newSigma = np.vstack((t1, t2))
            mu_i = s_ii * self.QIn[nu]
            deltaMu = np.vstack((-mu_i * tmp, mu_i))
            self.Mu = np.vstack((self.Mu, 0)) + deltaMu

            mCi = self.BASISBPhi - np.dot(self.betaBasisPHI, tmp)
            self.SIn -= s_ii * mCi**2
            self.QIn -= mu_i * mCi

            self.Used = np.hstack((self.Used, nu))
            self.addCount += 1
            updateRequired = True

        elif (self.selectedAction == self.actionDelete):
            self.basisPHI = np.delete(self.basisPHI, j, 1)
            self.PHI = np.delete(self.PHI, j, 1)
            self.Alpha = np.delete(self.Alpha, j, 0)
            s_jj = self.Sigma[j, j]
            s_j = self.Sigma[:, j]
            tmp = s_j / s_jj
            newSigma = self.Sigma - np.dot(tmp, s_j.T)
            newSigma = np.delete(newSigma, j, 0)
            newSigma = np.delete(newSigma, j, 1)
            deltaMu = -self.Mu[j] * tmp
            mu_j = self.Mu[j]
            self.Mu += deltaMu
            self.Mu = np.delete(self.Mu, j, 0)

            jPm = np.dot(self.betaBasisPHI, s_j)
            self.SIn += jPm**2 / s_jj
            self.QIn += jPm * mu_j / s_jj

            self.Used = np.delete(self.Used, j, 0)
            self.deleteCount += 1
            updateRequired = True

        M = len(self.Used)

        if (updateRequired):
            self.SOut[:] = self.SIn
            self.QOut[:] = self.QIn

            tmp = self.Alpha / (self.Alpha - self.SIn[self.Used])

            self.SOut[self.Used] = tmp * self.SIn[self.Used]
            self.QOut[self.Used] = tmp * self.QIn[self.Used]

            self.factor = (self.QOut * self.QOut - self.SOut)
            self.Sigma = newSigma
            self.Gamm = 1.0 - self.Alpha * np.atleast_2d(np.diag(self.Sigma)).T

            self.betaBasisPHI = self.beta * self.basisPHI

            self.logML += self.deltaLogMarginal[0, 0]
            self.countLoop += 1
            self.logMarginalLog = np.append(self.logMarginalLog, self.logML)

# Something went wrong. Recompute statistics
        if (np.sum(self.Gamm) < 0):
            self.fullStatistics()

# Recompute noise if not given
        if (self.noise is None) and (
            (self.selectedAction == self.actionTerminate) or
            (self.loop <= self.controls['BetaUpdateStart']) or
            (self.loop % self.controls['BetaUpdateFrequency'] == 0)):
            # Gaussian noise estimate
            betaZ1 = beta
            y = np.dot(self.PHI, self.Mu)
            e = self.targets - y
            beta = (N - np.sum(self.Gamm)) / np.dot(e.T, e)

            # Work-around zero-noise issue
            beta = np.amin([beta, 1.e6 / np.variance(self.targets)])
            deltaLogBeta = np.log(beta) - np.log(betaZ1)

            # Full re-computation of statistics after beta update
            if (np.abs(deltaLogBeta) > 1.e-6):
                self.fullStatistics()
                self.countLoop += 1
                self.logMarginalLog = np.append(self.logMarginalLog,
                                                self.logML)

                if (self.selectedAction == self.actionTerminate):
                    self.selectedAction = self.actionNoiseOnly
                    print("Noise update. Termination deferred")

        #self.AInv = np.diag(1.0/self.Alpha[:,0])
        #Sigma = 1.0/self.beta * np.identity(self.N) + np.dot(np.dot(self.PHI, self.AInv), self.PHI.T)
        #CInv, logD = cholInvert(Sigma)
        #logL = -0.5*logD - 0.5*np.dot(np.dot(self.targets.T,CInv),self.targets)

        print("{0:4d} - L={1:10.7f} - Gamma={2:10.7f} (M={3:4d}) - s={4:6.4f}".
              format(self.loop, self.logML[0] / N, np.sum(self.Gamm), M,
                     np.sqrt(1.0 / self.beta)))

        if (self.selectedAction == self.actionTerminate):
            print("Stopping at iteration {0} - max_delta_ml={1}".format(
                self.loop, self.deltaLogMarginal[0, 0]))
            print("L={0} - Gamma={1} (M={2}) - s={3}".format(
                self.logML[0] / N, np.sum(self.Gamm), M,
                np.sqrt(1.0 / self.beta)))

        iterationLimit = self.loop == 200
        self.lastIteration = iterationLimit
예제 #9
0
def variance(array):
    return np.variance(array)
예제 #10
0
import numpy as np
import matplotlib.pyplot as plt
mean = np.zeros(10)
index = np.arange(10)
for i in range(len(index)):
    s = np.random.poisson(3,1000)
    mean[i] = np.mean(s)

print mean
a = np.mean(mean)
a_v = np.variance(mean)
print a, a_v


plt.figure()
plt.title('10 Times')
plt.hist(mean, normed=True)
plt.show()
    
#############

import numpy as np
import matplotlib.pyplot as plt
mean = np.zeros(100)
index = np.arange(100)
for i in range(len(index)):
    s = np.random.poisson(3,1000)
    mean[i] = np.mean(s)

print mean
b = np.mean(mean)
예제 #11
0
파일: rvm.py 프로젝트: aasensio/pyiacsun
	def oneIteration(self):

#*****************
# Decision phase
#*****************

# Compute change in likelihood
		deltaML = np.zeros((self.MFull, 1))
		action = self.actionReestimate * np.ones((self.MFull,1))
		usedFactor = self.factor[self.Used]
		N, M = self.PHI.shape
		
		
# Reestimation
		iu = np.where(usedFactor > self.controls['ZeroFactor'])[0]
		index = self.Used[iu]
		newAlpha = self.SOut[index]**2 / self.factor[index]
		delta = 1.0 / newAlpha - 1.0 / self.Alpha[iu]
		
# Quick computation of change in log-likelihood given all re-estimations
		deltaML[index] = 0.5 * (delta * self.QIn[index]**2 / (delta * self.SIn[index]+1.0) - np.log(1.0+self.SIn[index]*delta))
		
# Deletion
		iu = np.where(usedFactor <= self.controls['ZeroFactor'])[0]
		index = self.Used[iu]
		anyToDelete = (not index.size == 0) and M > 1
		
		if (anyToDelete):
			deltaML[index] = -0.5 * (self.QOut[index]**2 / (self.SOut[index] + self.Alpha[iu]) - np.log(1.0+self.SOut[index] / self.Alpha[iu]))
			action[index] = self.actionDelete
			
# Addition
		GoodFactor = self.factor > self.controls['ZeroFactor']
		GoodFactor[self.Used] = 0
		GoodFactor[self.alignedOut] = 0
		index = np.where(GoodFactor)[0]
		anyToAdd = (not index.size == 0)
		
		if (anyToAdd):
# Quick computation of change in log-likelihood
			quot = self.QIn[index]**2 / self.SIn[index]
			deltaML[index] = 0.5 * (quot - 1.0 - np.log(quot))
			action[index] = self.actionAdd
			
		if (anyToAdd and self.controls['PriorityAddition']) or (anyToDelete and self.controls['PriorityDeletion']):
			
# We won't perform re-estimation this iteration
			deltaML[action == self.actionReestimate] = 0
			
# We should enforce add if preferred and delete
			if (anyToAdd and self.controls['PriorityAddition'] and (not self.controls['PriorityDeletion'])):
				deltaML[action == self.actionDelete] = 0
				
			if (anyToDelete and self.controls['PriorityDeletion'] and (not self.controls['PriorityAddition'])):
				deltaML[action == self.actionAdd] = 0

# Finally, choose the action that results in the greatest change in likelihood
		nu = np.atleast_1d(np.argmax(deltaML))	
		self.deltaLogMarginal = deltaML[nu]
		self.selectedAction = action[nu]
		anyWorthWhileAction = self.deltaLogMarginal > 0
		
# If basis nu is already in the model, find its index
		j = []
		if (self.selectedAction == self.actionReestimate) or (self.selectedAction == self.actionDelete):
			j = np.where(self.Used == nu)[0]
			
		self.Phi = np.atleast_2d(self.basis[:,nu])
		newAlpha = self.SOut[nu]**2 / self.factor[nu]
		
		change = np.abs(np.log(newAlpha) - np.log(self.Alpha[j]))
							
		if (not anyWorthWhileAction) or ((self.selectedAction == self.actionReestimate) and (change < self.controls['MinDeltaLogAlpha']) and (not anyToDelete)):
			self.selectedAction = self.actionTerminate
			
# Alignment checks for addition		
		if (self.selectedAction == self.actionAdd):
			p = np.dot(self.Phi.T, self.PHI)
			findAligned = np.where(p > self.controls['AlignmentMax'])[0]
			numAligned = findAligned.size
			
			if (numAligned > 0):
# The added basis function is effectively indistinguishable from one present already
				self.selectedAction = self.actionAlignmentSkip
				self.alignDeferCount += 1
				
# Take note not to try this again
				self.alignedOut = np.append(self.alignedOut, nu*np.ones((numAligned,1))).astype(int)
				self.alignedIn = np.append(self.alignedIn, self.Used[findAligned])
		
# Alignment checks for deletion
		if (self.selectedAction == self.actionDelete):
			findAligned = np.where(self.alignedIn == nu)[0]
			numAligned = findAligned.size
			if (numAligned > 0):
				reinstated = self.alignedOut[findAligned]
				self.alignedIn = np.delete(self.alignedIn, findAligned, 0)
				self.alignedOut = np.delete(self.alignedOut, findAligned, 0)
				
#*****************
# Action phase
#*****************
		updateRequired = False
		
		if (self.selectedAction == self.actionReestimate):			
# Basis function nu is already in the model and we're reeestimatig its alpha
			oldAlpha = self.Alpha[j]
			self.Alpha[j] = newAlpha
			S_j = self.Sigma[:,j]
			deltaInv = 1.0 / (newAlpha - oldAlpha)
			kappa = 1.0 / (self.Sigma[j,j] + deltaInv)
			tmp = kappa * S_j
			newSigma = self.Sigma - np.dot(tmp, S_j.T)
			deltaMu = -self.Mu[j] * tmp
			self.Mu += deltaMu
			
			self.SIn += kappa * np.dot(self.betaBasisPHI, S_j)**2
			self.QIn -= np.dot(self.betaBasisPHI, deltaMu)
			
			self.updateCount += 1
			updateRequired = True
			
		elif (self.selectedAction == self.actionAdd):						
			self.basisPhi = np.dot(self.basis.T, self.Phi)
			self.basisPHI = np.hstack((self.basisPHI, self.basisPhi))
			self.BPhi = self.beta * self.Phi
			self.BASISBPhi = self.beta * self.basisPhi
			
			tmp = np.dot(np.dot(self.BPhi.T, self.PHI), self.Sigma).T
			
			self.Alpha = np.vstack((self.Alpha,newAlpha))
			self.PHI = np.hstack((self.PHI, self.Phi))
			s_ii = 1.0 / (newAlpha + self.SIn[nu])
			s_i = -s_ii * tmp
			TAU = -np.dot(s_i, tmp.T)
			
			t1 = np.hstack((self.Sigma+TAU, s_i))
			t2 = np.hstack((s_i.T, s_ii))
			newSigma = np.vstack((t1,t2))
			mu_i = s_ii * self.QIn[nu]
			deltaMu = np.vstack((-mu_i*tmp, mu_i))
			self.Mu = np.vstack((self.Mu, 0)) + deltaMu
			
			mCi = self.BASISBPhi - np.dot(self.betaBasisPHI, tmp)
			self.SIn -= s_ii * mCi**2
			self.QIn -= mu_i * mCi
			
			self.Used = np.hstack((self.Used, nu))
			self.addCount += 1
			updateRequired = True						
			
		elif (self.selectedAction == self.actionDelete):			
			self.basisPHI = np.delete(self.basisPHI, j, 1)
			self.PHI = np.delete(self.PHI, j, 1)
			self.Alpha = np.delete(self.Alpha, j, 0)
			s_jj = self.Sigma[j,j]
			s_j = self.Sigma[:,j]
			tmp = s_j / s_jj
			newSigma = self.Sigma - np.dot(tmp, s_j.T)
			newSigma = np.delete(newSigma, j, 0)
			newSigma = np.delete(newSigma, j, 1)
			deltaMu = -self.Mu[j] * tmp
			mu_j = self.Mu[j]
			self.Mu += deltaMu
			self.Mu = np.delete(self.Mu, j, 0)
			
			jPm = np.dot(self.betaBasisPHI, s_j)
			self.SIn += jPm**2 / s_jj
			self.QIn += jPm * mu_j / s_jj
			
			self.Used = np.delete(self.Used, j, 0)
			self.deleteCount += 1
			updateRequired = True
			
		M = len(self.Used)
		
		if (updateRequired):
			self.SOut[:] = self.SIn
			self.QOut[:] = self.QIn
			
			tmp = self.Alpha / (self.Alpha - self.SIn[self.Used])
			
			self.SOut[self.Used] = tmp * self.SIn[self.Used]
			self.QOut[self.Used] = tmp * self.QIn[self.Used]
						
			self.factor = (self.QOut * self.QOut - self.SOut)
			self.Sigma = newSigma
			self.Gamm = 1.0 - self.Alpha * np.atleast_2d(np.diag(self.Sigma)).T
			
			self.betaBasisPHI = self.beta * self.basisPHI
			
			self.logML += self.deltaLogMarginal[0,0]
			self.countLoop += 1
			self.logMarginalLog = np.append(self.logMarginalLog, self.logML)
									
# Something went wrong. Recompute statistics
		if (np.sum(self.Gamm) < 0):
			self.fullStatistics()
		
# Recompute noise if not given
		if (self.noise is None) and ((self.selectedAction == self.actionTerminate) or (self.loop <= self.controls['BetaUpdateStart']) or 
			(self.loop % self.controls['BetaUpdateFrequency'] == 0)):
# Gaussian noise estimate
			betaZ1 = beta
			y = np.dot(self.PHI, self.Mu)
			e = self.targets - y
			beta = (N - np.sum(self.Gamm)) / np.dot(e.T, e)
			
# Work-around zero-noise issue
			beta = np.amin([beta, 1.e6 / np.variance(self.targets)])
			deltaLogBeta = np.log(beta) - np.log(betaZ1)
			
# Full re-computation of statistics after beta update
			if (np.abs(deltaLogBeta) > 1.e-6):
				self.fullStatistics()
				self.countLoop += 1
				self.logMarginalLog = np.append(self.logMarginalLog, self.logML)
				
				if (self.selectedAction == self.actionTerminate):
					self.selectedAction = self.actionNoiseOnly
					print("Noise update. Termination deferred")
		
		#self.AInv = np.diag(1.0/self.Alpha[:,0])
		#Sigma = 1.0/self.beta * np.identity(self.N) + np.dot(np.dot(self.PHI, self.AInv), self.PHI.T)
		#CInv, logD = cholInvert(Sigma)
		#logL = -0.5*logD - 0.5*np.dot(np.dot(self.targets.T,CInv),self.targets)
		
		print("{0:4d} - L={1:10.7f} - Gamma={2:10.7f} (M={3:4d}) - s={4:6.4f}".format(self.loop,self.logML[0]/N, np.sum(self.Gamm), M, np.sqrt(1.0/self.beta)))
				
		
		if (self.selectedAction == self.actionTerminate):
			print("Stopping at iteration {0} - max_delta_ml={1}".format(self.loop, self.deltaLogMarginal[0,0]))
			print("L={0} - Gamma={1} (M={2}) - s={3}".format(self.logML[0]/N, np.sum(self.Gamm), M, np.sqrt(1.0/self.beta)))
		
		iterationLimit = self.loop == 200
		self.lastIteration = iterationLimit
예제 #12
0
 def StartingValue(self):
     return np.array(np.mean(self._data), np.variance(self._variance))
예제 #13
0
import numpy as np
import matplotlib.pyplot as plt
mean = np.zeros(10)
index = np.arange(10)
for i in range(len(index)):
    s = np.random.poisson(3, 1000)
    mean[i] = np.mean(s)

print mean
a = np.mean(mean)
a_v = np.variance(mean)
print a, a_v

plt.figure()
plt.title('10 Times')
plt.hist(mean, normed=True)
plt.show()

#############

import numpy as np
import matplotlib.pyplot as plt
mean = np.zeros(100)
index = np.arange(100)
for i in range(len(index)):
    s = np.random.poisson(3, 1000)
    mean[i] = np.mean(s)

print mean
b = np.mean(mean)
b_v = np.variance(mean)