Пример #1
0
	def create(self,mean,cov):
		try:
			self.pdf = mn(mean=mean,cov=cov)
			self.mean = mean
			self.cov = cov
		except:
			cov[np.diag_indices_from(cov)] += 1e-4
			self.pdf = mn(mean=mean,cov=cov)
			self.mean = mean
			self.cov = cov
Пример #2
0
    def update(self):

        tempmean = np.zeros(len(self.mean))
        tempmean = self.mean + ((self.rec - self.mean) / (float(self.counter)))
        self.cov = ((self.cov * (self.counter - 1.0)) + \
                    (np.outer((self.rec - tempmean), (self.rec - self.mean))) / (self.counter))
        self.mean = tempmean
        try:
            self.pdf = mn(mean=self.mean, cov=self.cov)
        except BaseException:
            self.cov[np.diag_indices_from(self.cov)] += 1e-4
            self.pdf = mn(mean=self.mean, cov=self.cov)
        self.counter += 1.0
        return
    def predict(self, X_test):

        # Define the prediction vector
        y_pred = np.zeros(len(X_test))

        # Predict the label for each test sample
        for i, test_sample in enumerate(X_test):

            # Define the numerator vector of the Bayes theorem
            numerator = np.zeros(self.n_labels)

            # Compute the numerator for each label
            for j in range(self.n_labels):

                # Compute the likelihood
                likelihood = mn(self.mean_labels[j], self.cov).pdf(test_sample)

                # Compute the numerator
                numerator[j] = self.prior[j] * likelihood

            # Compute the evidence of the Bayes theorem
            evidence = numerator.sum()

            # Compute the posterior using Bayes theorem
            posterior = numerator / evidence

            # Chose the label which maximizes the posterior probability
            y_pred[i] = np.argmax(posterior)

        return y_pred
Пример #4
0
def bisrv(yijs, beta0, beta1, xijs, tau, sigma):
    n, J = yijs.shape
    tau2, sigma2 = tau**2, sigma**2
    varr = sigma2 / (sigma2 / tau2 + J)
    cov = np.diag(np.ones(n) * varr)
    means = (yijs - beta0 - beta1 * xijs).sum(axis=1) / (sigma2 / tau2 + J)
    return mn(mean=means, cov=cov)
Пример #5
0
	def update(self):
		
		tempmean = np.zeros(len(self.mean))
		for i in range(0,len(self.mean)):
			tempmean[i] = self.mean[i] + float((self.rec[i] - self.mean[i])/(float(self.counter)))
		for i in range(0,len(self.mean)):
			for j in range(0,len(self.mean)):
				self.cov[i][j] = float((self.cov[i][j]*(self.counter-1) + (self.rec[i]-tempmean[i])*(self.rec[j] - self.mean[j]))/(self.counter))
		self.mean = tempmean
		try:
			self.pdf = mn(mean=self.mean,cov=self.cov)
		except:
			self.cov[np.diag_indices_from(self.cov)] += 1e-4
			self.pdf = mn(mean=self.mean,cov=self.cov)
		self.counter = self.counter+1
		return
Пример #6
0
def gaussian_multi(mean, cov, color, plot_location, class_name):
    # Generate Random Number
    x, y = np.mgrid[-0.1:0.1:0.001, -0.1:0.1:0.001]
    pos = np.dstack((x, y))
    rv = mn(mean, cov)
    # Plot
    plot = plt.subplot(plot_location)
    plot.contour(x, y, rv.pdf(pos))
def plot_fig(x, M):
    #Unnormalised posteior plot:
    zPost = postDist(x, priorDist=zPrior)

    countour_plot(zPost, zmin + 1, zmax + 1)
    # distribution of inverse-mapped points
    noise = mn(mean=[0, 0, 0], cov=np.eye(3)).rvs(size=M)
    x_vec = np.reshape(np.repeat(x, M), (M, 1))
    x_g = sess.run(G, {gen_noise_node: noise, gen_x_node: x_vec})
    plt.scatter(x_g[:, 0], x_g[:, 1], label='p_g', alpha=0.3)
    plt.title('Contour plot of Unnormalised Posterior density for x = ' +
              str(x))
Пример #8
0
def plot_fig(r, M):
    #Unnormalised posteior plot:
    countour_plot(zPost, zmin + 1, zmax + 1)
    plt.title('Posterior Countour Plot')
    # distribution of inverse-mapped points
    xd = np.reshape(np.repeat(zPost.x, M), (M, 1))
    noise = mn(mean=[0, 0, 0], cov=np.eye(3)).rvs(size=M)

    sess.run(G, {gen_noise_node: noise, gen_x_node: xd})
    z = zDist.rvs(r)
    gs = np.zeros((r, 2))  # generator function
    for i in range(int(r / M)):
        z = zDist.rvs(M)
        gs[M * i:M * (i + 1), :] = sess.run(G, {z_node: z})
    plt.scatter(gs[:, 0], gs[:, 1], label='p_g', alpha=0.3)
Пример #9
0
Файл: nodes.py Проект: AmurG/spn
    def update(self):

        tempmean = np.zeros(len(self.mean))
        for i in range(0, len(self.mean)):
            tempmean[i] = self.mean[i] + float(
                (self.rec[i] - self.mean[i]) / (float(self.counter)))
        for i in range(0, len(self.mean)):
            for j in range(0, len(self.mean)):
                self.cov[i][j] = float(
                    (self.cov[i][j] * (self.counter - 1) +
                     (self.rec[i] - tempmean[i]) *
                     (self.rec[j] - self.mean[j])) / (self.counter))
        self.mean = tempmean
        self.pdf = mn(mean=self.mean, cov=self.cov)
        self.counter = self.counter + 1

        return
Пример #10
0
print(
    f"z-score of 3 corresponds to a prob of {100 * 2 * norm.sf(threshold):0.2f}%"
)
visual_scatter = np.random.normal(size=d1.size)
plt.scatter(d1[good], visual_scatter[good], s=2, label="Good", color="#4CAF50")
plt.scatter(d1[~good],
            visual_scatter[~good],
            s=2,
            label="Bad",
            color="#F44336")
plt.legend()

from scipy.stats import multivariate_normal as mn

mean, cov = np.mean(d2, axis=0), np.cov(d2.T)
good = mn(mean, cov).pdf(d2) > 0.01 / 100

plt.scatter(d2[good, 0], d2[good, 1], s=2, label="Good", color="#4CAF50")
plt.scatter(d2[~good, 0], d2[~good, 1], s=2, label="Bad", color="#F44336")
plt.legend()

xs, ys = d3.T
p = np.polyfit(xs, ys, deg=5)
ps = np.polyval(p, xs)
plt.plot(xs, ys, '.', label="Data")
plt.plot(xs, ps, label="Bad poly fit")
plt.legend()

x, y = xs.copy(), ys.copy()

for i in range(5):
Пример #11
0
Файл: nodes.py Проект: AmurG/spn
 def create(self, mean, cov):
     self.pdf = mn(mean=mean, cov=cov)
     self.mean = mean
     self.cov = cov
    print("Sample Size: ", i, "\n")
    cl1t = sp1.sample(i).iloc[:, 0:2]
    cl2t = sp2.sample(i).iloc[:, 0:2]
    cov1 = cl1t.cov()
    cov2 = cl2t.cov()
    m1 = cl1t.mean()
    m2 = cl2t.mean()
    """
 print("Class 1 mean: \n", m1.to_numpy(),"\n")
 print("Class 1 covariance: \n", cov1.to_numpy(),"\n")
 print("class 2 mean: \n", m2.to_numpy(),"\n")
 print("class 2 covariance: \n", cov2.to_numpy(),"\n")
 """
    res = []
    for x in testset:
        if mn(m1, cov1).pdf(x[:2]) > mn(m2, cov2).pdf(x[:2]):
            res.append(1)
        else:
            res.append(-1)
    neigh = KNeighborsClassifier(n_neighbors=1)
    neigh.fit(pd.concat([cl1t, cl2t]), [1] * i + [-1] * i)
    predicted_values = neigh.predict(testset[:, 0:2])
    print("Bayes Report:")
    print(classification_report(testset[:, 2], res, labels=[-1, 1]))
    """
 print("Nearest Neighbour Report:")
 print(classification_report(testset[:,2], predicted_values, labels=[-1,1]))
 print("\n\n")
 """
    clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
    clf.fit(pd.concat([cl1t, cl2t]))
Пример #13
0
sm1 = np.array([0] * 20)
sm2 = np.array([1] * 20)

ns = [50, 100, 300]
for i in ns:
    cl1t = cl1.sample(i)
    cl2t = cl2.sample(i)
    cov1 = cl1t.cov()
    cov2 = cl2t.cov()
    m1 = cl1t.mean()
    m2 = cl2t.mean()
    """
 print(np.linalg.det(cov1))
 print(np.linalg.det(cov2))
 """
    res = []

    for x in testset:
        if mn(m1, cov1).pdf(x[:20]) > mn(m2, cov2).pdf(x[:20]):
            res.append(1)
        else:
            res.append(-1)
    print("Sample size:", i)
    print("Bayes Report")
    print(classification_report(testset[:, 20], res, labels=[-1, 1]))
    neigh = KNeighborsClassifier(n_neighbors=1)
    neigh.fit(pd.concat([cl1t, cl2t]), [1] * i + [-1] * i)
    predicted_values = neigh.predict(testset[:, 0:20])
    print("Nearest Neighbour Report:")
    print(classification_report(testset[:, 20], res, labels=[-1, 1]))
Пример #14
0
#Plots from generated data

from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D

# 2-dimensional distribution over variables X and Y
N = 60
X = np.linspace(-15, 15, N)
Y = np.linspace(-15, 15, N)
X, Y = np.meshgrid(X, Y)
# Pack X and Y into a single 3-dimensional array
pos = np.empty(X.shape + (2, ))
pos[:, :, 0] = X
pos[:, :, 1] = Y

F1 = mn(mu1, sig1)
F2 = mn(mu2, sig2)
F3 = mn(mu3, sig3)
F4 = mn(mu4, sig4)
F5 = mn(mu5, sig5)
Z = F1.pdf(pos) + F2.pdf(pos) + F3.pdf(pos) + F4.pdf(pos) + F5.pdf(pos)
# Create a surface plot and projected filled contour plot under it.
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X,
                Y,
                Z,
                rstride=3,
                cstride=3,
                linewidth=1,
                antialiased=True,
Пример #15
0
    def logLik(self, z):
        return -np.log(self.beta_par(z.T)) - self.x / self.beta_par(z.T)

    def pdf(self, z):  # Unnormalised posterior density.
        post_pdf = np.exp(self.logLik(z) + self.priorDist.logpdf(z))
        return post_pdf


#create a regular grid in weight space for visualisation
np.random.seed(seed=1234)
zmin = -5
zmax = 5

prior_var = 2
zPrior = mn([0, 0], np.eye(2) * prior_var)
#let the likelihood be a simple exponential distribution where the
#mean is parametrised by the two hidden variables x_1 and x_2
zPost = postDist(x=10, priorDist=zPrior)

#Prior and Posterior contour plot:
countour_plot(zPrior, zmin, zmax, label=True)
plt.title('Prior Countour Plot')
#Unnormalised posteior plot:
countour_plot(zPost, zmin, zmax, label=False)
plt.title('Posterior Countour Plot')
plt.show()


#Neural Network
# MLP - used for D_pre, D1, D2, G networks
Пример #16
0
    return optimizer


##Network architecture:

seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)

mu = [1, 1]
Rho = np.diag([1.0, 1.0])
Rho[0, 1] = Rho[1, 0] = 0.5
Sd = np.diag([0.7, 0.7])
Sigma = Sd @ Rho @ Sd

xDist = mn(mu, Sigma)
zDist = mn([0, 0], np.eye(2))


# MLP - used for D_pre, D1, D2, G networks
def linear(input, output_dim, stddev=1.0):
    norm = tf.random_normal_initializer(stddev=stddev)
    const = tf.constant_initializer(0.0)
    with tf.variable_scope(scope or 'linear'):
        w = tf.get_variable('w', [input.get_shape()[1], output_dim],
                            initializer=norm)
        b = tf.get_variable('b', [output_dim], initializer=const)
        return tf.matmul(input, w) + b


def generator(input, h_dim):
Пример #17
0
test = pd.DataFrame(testset)
sp1 = train[train[1] > 0]
sp2 = train[train[1] < 0]
sp1t = sp1.iloc[:, 0:1].values
sp2t = sp2.iloc[:, 0:1].values
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.fit(sp1t)
clf2 = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf2.fit(sp2t)

res = []

for x in testset:
    a = b = 0
    for i in range(2):
        a += clf.weights_[i] * mn(clf.means_[i], clf.covariances_[i]).pdf(x[0])
    for i in range(2):
        b += clf2.weights_[i] * mn(clf2.means_[i], clf2.covariances_[i]).pdf(
            x[0])
    if a > b:
        res.append(1)
    else:
        res.append(-1)
print("EM Bayes:")
print("Class 1:\n", "Mixing Coefficient: ", clf.weights_[0], "Mean:",
      clf.means_[0], "Variance:", clf.covariances_[0])
print("Mixing Coefficient: ", clf.weights_[1], "Mean:", clf.means_[1],
      "Variance:", clf.covariances_[1])
print("Class -1:\n", "Mixing Coefficient: ", clf2.weights_[0], "Mean:",
      clf2.means_[0], "Variance:", clf2.covariances_[0])
print("Mixing Coefficient: ", clf2.weights_[1], "Mean:", clf2.means_[1],