def test_gauss(): mu0 = [0,0] cov0 = (0.8**2)*np.identity(2) g0 = 50*Gauss(numN=(mu0,cov0)) mu1 = [1,0] cov1 = (0.1**2)*np.array([[2,1],[1,1]]) g1 = Gauss(numN=(mu1,cov1)) mu2 = [0,1.5] cov2 = (0.3**2)*np.identity(2) g2 = 8*Gauss(numN=(mu2,cov2)) g3 = g2 + g1 + g0 import code; code.interact(local=locals())
def plot_fit(series, filename): y = series.tolist() x = np.array(range(len(y))) gauss = Gauss(x, y) y_pred = gauss.fit() fit_series = pd.Series(y_pred, series.index, name="Fitted Curve") _, m, s = gauss.par current = int(sum(y)) estimate = gauss.estimate_total() fig1 = series.iplot(kind="bar", asFigure=True) fig2 = fit_series.iplot(asFigure=True, colors=['blue'], width=2, dash="dashdot") fig = cf.tools.merge_figures([fig1, fig2]) fig = go.Figure(fig) fig.update_layout( title_text="Total Infection Estimate<br>-------------------------------"\ "<br>Current: {} people,"\ " Estimate: {} people".format(current,estimate), yaxis_title="Capita [-]") cf.iplot(figure=fig, asUrl=True, filename=filename) return
def test_gradient_implementation(): cov = np.array([[2,1],[1,1]]) mu = np.array([0.1,0.2]) g = 2 * Gauss(numN=[(mu,cov)]) x = np.array([0.2,0.4]) print(g.grad(x)) g.single_gaussian = False print(g.grad(x)) print(g.grad())
def test_gradient(): cov = np.array([[2,1],[1,1]]) mu = np.array([0.1,0.2]) g = 2 * Gauss(numN=[(mu,cov)]) x = np.array([0.2,0.4]) dx = 0.000001 dgdx = np.array([ (g[x+np.array([dx,0])] - g[x]) / dx, (g[x+np.array([0,dx])] - g[x]) / dx ]) dgdx2 = -g[x] * np.linalg.inv(g.numN[0].cov) @ (x.reshape((-1,)) - g.numN[0].mean) print(np.linalg.inv(g.numN[0].cov)) print("%s - %s" % (x.reshape((-1,)), g.numN[0].mean)) print("computed dgdx: %s" % dgdx) print("calculated dgdx: %s" % dgdx2) import code; code.interact(local=locals())
def Run(e): global powerFlow if admittance == None: return # Else frm.SetStatusText('Running Gauss-Seidel Power Flow') # Takes admittance matrix and known values at each bus with wx.FileDialog(frm, 'PQVd Information') as fileDialog: if fileDialog.ShowModal() == wx.ID_CANCEL: return fname = fileDialog.GetPath() pqvd = pd.read_csv(fname, header=None) powerFlow = Gauss(admittance.admittance, pqvd[0].values, pqvd[1].values, pqvd[2].values, pqvd[3].values) powerFlow.solve() wx.StaticText( frm, 2, 'V = ' + np.array2string(np.array(powerFlow.V)) + '\n S = ' + np.array2string(np.array(powerFlow.S)) + '\n') frm.SetStatusText('')
def soft_assign(self, x, mixmean=None, mixcoef=None, mixcov=None): """ Calculate the probability of x for each of the k classes. Typically the sample is assigned to class n with n = arg max(resp). Parameters ---------- x : (d,n) ndarray The observations that need to be classified. mixmeans : (d,k), ndarray, or None The means of the k mixture components. If `None` use the values of the class. mixcoef : (k,) ndarray, or None The k mixture coefficients If `None` use the values of the class. mixcov : (k,d,d), ndarray, or None The covariances of the k mixture components If `None` use the values of the class. Return ------ prob : (k,) ndarray The probabilities for each of the k classes. """ if len(x.shape) == 1: x = x[:, None] d, n = x.shape # Get the necessary parameters from the class mixmean = self.mixmean mixcoef = self.mixcoef mixcov = self.mixcov k = len(mixcoef) prob = np.zeros((k, n)) for j in range(n): for i in range(k): g = Gauss(mixmean[:, i], mixcov[i]) prob[i, j] = g.f(x[:, j]) * mixcoef[i] return prob / np.sum(prob, axis=0)[None, :]
def _respon(self, mixmean, mixcoef, mixcov): """ Calculate the responsibilities of each data point for each class k. Parameters ---------- mixmean : (d,k) ndarray The means of the k mixture components mixcoef : (d,) array The mixture coefficient for each component mixcov : (k,d,d) ndarray The covariance matrices of the k mixture components Return ------ gam : (k,n) ndarray The responsibilities of data point x_n to the mixture components k """ data = self.data d, n = data.shape k = self.k # TODO TEST THIS gam = np.zeros((k, n)) # print "inside _respons" # print np.shape(self.logf(data.T[0], mixmean, (mixcoef), mixcov)) # print np.sum(mixcoef,axis = 0) gaussian = np.zeros((k, n)) for j in range(n): for i in range(k): g = Gauss(mixmean[:, i], mixcov[i]) gaussian[i, j] = np.log(g.f(data[:, j])) + np.log(mixcoef[i]) # for i in range(k): # gaussian_nominator = self.logf(data[j], mixmean, mixcoef, mixcov) + np.log(mixcoef) # print "---------" # print gaussian_nominator.shape # print gaussian_denominator_total.shape # print gam.shape gaussian = gaussian - np.log(np.sum(np.exp(gaussian), axis=0)[None, :]) # gam[:,j] = np.exp(gaussian_nominator - gaussian_denominator_total) gam = np.exp(gaussian) return gam
def Nt(T, P): X = [-1, 3, -1, -10, -20, -35] while True: gamma = dichotomy(0, 3, EPS, lambda cur_gamma: gamma_func(cur_gamma, T, X)) d_e = find_d_e(T, gamma) K = find_K(T, d_e) alpha = 0.285 * pow(10, -11) * pow(gamma * T, 3) system = [[1, -1, 1, 0, 0, 0, log(K[0]) + X[1] - X[2] - X[0]], [1, 0, -1, 1, 0, 0, log(K[1]) + X[2] - X[3] - X[0]], [1, 0, 0, -1, 1, 0, log(K[2]) + X[3] - X[4] - X[0]], [1, 0, 0, 0, -1, 1, log(K[3]) + X[4] - X[5] - X[0]], [ -exp(X[0]), -exp(X[1]), -exp(X[2]), -exp(X[3]), -exp(X[4]), -exp(X[5]), exp(X[0]) + exp(X[1]) + exp(X[2]) + exp(X[3]) + exp(X[4]) + exp(X[5]) - alpha - P * 7243 / T ], [ exp(X[0]), 0, -Z_c[1] * exp(X[2]), -Z_c[2] * exp(X[3]), -Z_c[3] * exp(X[4]), -Z_c[4] * exp(X[5]), Z_c[1] * exp(X[2]) + Z_c[2] * exp(X[3]) + Z_c[3] * exp(X[4]) + Z_c[4] * exp(X[5]) - exp(X[0]) ]] d_X = Gauss(system) if max([d_X[i] / X[i] for i in range(len(X))]) < 1e-4: break for i in range(len(X)): X[i] += d_X[i] return sum([exp(i) for i in X])
def f(self, x, mixmean=None, mixcoef=None, mixcov=None): """ Evaluate the gmm at x. Parameters ---------- x : (d,) ndarray A single d-dimensional observation mixmean : (d,k), ndarray, or None The means of the k mixture components If `None` use the values of the class. mixcoef : (k,) ndarray, or None The k mixture coefficients If `None` use the values of the class. mixcov : (k,d,d), ndarray, or None The covariances of the k mixture components. If `None` use the values of the class. Return ------ val : float The value of the gmm at given x. """ if mixmean == None: mixmean = self.mixmean if mixcoef == None: mixcoef = self.mixcoef if mixcov == None: mixcov = self.mixcov # print mixcoef # print np.shape(mixcoef) k = len(mixcoef) comp = np.zeros((k, )) for j in range(k): g = Gauss(mixmean[:, j], mixcov[j]) comp[j] = g.f(x) return mixcoef.dot(comp)
import inference from gauss import Gauss # Handles for readability. inf = np.inf # HMM parameters # prior pi = np.array([0.6, 0.4]) # state transition matrix A = np.array([[0.7, 0.3], [0.2, 0.8]]) # state emission probabilities B = np.array([Gauss(mean=np.array([1.0, 2.0]), cov=np.eye(2)), Gauss(mean=np.array([0.0, -1.0]), cov=np.eye(2)) ]) # DBN intra = np.array([[0,1],[0,0]]) # Intra-slice dependencies inter = np.array([[1,0],[0,0]]) # Inter-slice dependencies node_sizes = np.array([2,inf]) discrete_nodes = [0] continuous_nodes = [1] node_cpds = [cpds.TabularCPD(pi), cpds.GaussianCPD(B), cpds.TabularCPD(A)]
def setUp(self): self.gauss = Gauss([[10, 2, 1], [1, 5, 1], [2, 3, 10]])
def test_ghmm(): """ Testing: GHMM """ # Handles for readability. inf = np.inf # HMM parameters # prior pi = np.array([0.6, 0.4]) # state transition matrix A = np.array([[0.7, 0.3], [0.2, 0.8]]) # state emission probabilities B = np.array([ Gauss(mean=np.array([1.0, 2.0]), cov=np.eye(2)), Gauss(mean=np.array([0.0, -1.0]), cov=np.eye(2)) ]) # DBN intra = np.array([[0, 1], [0, 0]]) # Intra-slice dependencies inter = np.array([[1, 0], [0, 0]]) # Inter-slice dependencies node_sizes = np.array([2, inf]) discrete_nodes = [0] continuous_nodes = [1] node_cpds = [cpds.TabularCPD(pi), cpds.GaussianCPD(B), cpds.TabularCPD(A)] dbn = models.DBN(intra, inter, node_sizes, discrete_nodes, continuous_nodes, node_cpds) inference_engine = inference.JTreeUnrolledDBNInferenceEngine() inference_engine.model = dbn inference_engine.initialize(T=5) dbn.inference_engine = inference_engine # INERENCE evidence = [[None, [1.0, 2.0]], [None, [3.0, 4.0]], [None, [5.0, 6.0]], [None, [7.0, 8.0]], [None, [9.0, 10.0]]] dbn.enter_evidence(evidence) print "Likelihood of single sample: %f" % dbn.sum_product() # LEARNING samples = [[[None, [-0.9094, -3.3056]], [None, [2.7887, 2.3908]], [None, [1.0203, 1.5940]], [None, [-0.5349, 2.2214]], [None, [-0.3745, 1.1607]]], [[None, [0.7914, 2.7559]], [None, [0.3757, -2.3454]], [None, [2.4819, 2.0327]], [None, [2.8705, 0.7910]], [None, [0.2174, 1.2327]]]] print "\nEM parameter learning:" dbn.learn_params_EM(samples, max_iter=10) print "\nPrior (pi):" print dbn.node_cpds[0] print "\nTransition matrx (A):" print dbn.node_cpds[2] print "\nEmission probabilities (B):" print dbn.node_cpds[1]