Ejemplo n.º 1
0
class PolyBasisLinearModelRNG(object): 
	def __init__(self, basis, noise_amplitude, model_parameters):
		assert(isinstance(model_parameters, np.ndarray)) 
		assert(len(model_parameters.shape) == 1 and model_parameters.shape[0] >= basis)

		self.n = basis
		self.a = noise_amplitude 
		self.w = model_parameters

		self.r = UnivariateRNG()

	def setw(self,model_parameters): 
		assert(isinstance(model_parameters, np.ndarray)) 
		assert(len(model_parameters.shape) == 1 and model_parameters.shape[0] >= self.n)
		self.w = model_parameters

	def __call__(self): 
		x = 10.0
		while x >= 10.0 or x <= -10.0:
			x = self.r.rand_uniform(-10.0, 10.0)

		y = 0
		for i in range(self.n):
			y += self.w[i] * x ** i 
		y += self.r.rand_normal(0, self.a)

		return x, y
Ejemplo n.º 2
0
class ExpectationMaximization(object):
    def __init__(self, imgs, lbls):
        self.imgs = imgs
        self.lbls = lbls
        self.rng = UnivariateRNG()

        self.n_classes = int(np.amax(lbls) - np.amin(lbls)) + 1
        self.n_imgs = self.imgs.shape[2]
        self.img_l = self.imgs.shape[0]

        self.mu = np.reshape([
            self.rng.rand_uniform(0.25, 0.75)
            for i in range(self.img_l**2 * self.n_classes)
        ], (self.img_l, self.img_l, self.n_classes))
        self.mu[:, :] /= np.sum(self.mu, axis=(0, 1))
        for i in range(10):
            print("min %f, max %f, mean %f" %
                  (np.min(self.mu[:, :, i]), np.max(
                      self.mu[:, :, i]), np.mean(self.mu[:, :, i])))
            plt.imshow(self.mu[:, :, i])
            plt.colorbar()
            plt.show()

        self.pi = np.array([1 / self.n_classes for i in range(self.n_classes)])

    def dumbify(self, p):
        p[p < 0] = np.abs(p[p < 0])
        p[p > 1] = 2 - p[p > 1]
        return p

    def expectation(self, imgs):
        E_z_nc = np.ndarray((imgs.shape[2], self.n_classes))
        for n in range(imgs.shape[2]):
            for c in range(self.n_classes):
                # if (self.mu <= 0.).any():
                # 	print("BAAAAAAAA")

                # E_z_nc[n, c] = np.log(self.pi[c])
                # E_z_nc[n, c] += np.sum(np.multiply(np.log(self.mu[:,:,c]), imgs[:,:, n] ))
                # E_z_nc[n, c] += np.sum(np.multiply(np.log(1 - self.mu[:,:,c]), 1 - imgs[:,:, n] ))

                E_z_nc[n, c] = np.nanprod(
                    np.multiply(
                        np.power(self.mu[:, :, c], imgs[:, :, n]),
                        np.power((1 - self.mu[:, :, c]), (1 - imgs[:, :, n]))))
                # plt.imshow(np.multiply(
                # 			np.power( self.mu[:,:,c], imgs[:,:, n] ),
                # 			np.power( (1 - self.mu[:, :, c]), (1 - imgs[:,:, n]) )
                # 			))
                # plt.colorbar()
                # print(E_z_nc[n,c])
                # plt.show()
                E_z_nc[n, c] *= self.pi[c]

        # E_z_nc = np.log(E_z_nc)

        for c in range(self.n_classes):
            # E_z_nc[:,c] -= np.log(np.sum(np.exp(E_z_nc), axis=1))
            E_z_nc[:, c] /= np.sum(E_z_nc, axis=1)

        return np.nan_to_num(E_z_nc)

    def maximization(self, E_z_nc):
        N_c = np.sum(E_z_nc, axis=0)
        print(N_c)

        img_mean_c = np.ndarray((self.img_l, self.img_l, self.n_classes))
        for c in range(self.n_classes):
            img_mean_c[:, :, c] = E_z_nc[0, c] * self.imgs[:, :, 0]
            for n in range(1, self.n_imgs):
                img_mean_c[:, :, c] += E_z_nc[n, c] * self.imgs[:, :, n]

        img_mean_c[:, :] /= N_c[c]

        self.mu = self.dumbify(np.nan_to_num(img_mean_c))
        self.pi = self.dumbify(np.nan_to_num(N_c / self.n_imgs))
        # self.pi = np.nan_to_num(N_c / np.sum(N_c))
        print(self.pi)
        print(np.sum(N_c))

    def run_once(self):
        E_z_nc = self.expectation(self.imgs)
        self.maximization(E_z_nc)

    def __call__(self, imgs):
        return self.expectation(imgs)
Ejemplo n.º 3
0
class EMAlgorithm(object):
    def __init__(self, imgs, lbls, lbda_0=None, proba_0=None):
        self.imgs = binarize_imgs(imgs)
        self.lbls = lbls

        self.n_classes = 10
        self.n_imgs = self.lbls.shape[0]
        self.img_x = 28
        self.img_y = 28

        self.rng = UnivariateRNG()

        if proba_0 is not None:
            assert (proba_0.shape == (self.img_x, self.img_y, self.n_classes))
        else:
            proba_0 = np.array([
                self.rng.rand_uniform(0.25, 0.75)
                for i in range(self.img_x * self.img_y * self.n_classes)
            ]).reshape((self.img_x, self.img_y, self.n_classes))

            proba_0[:, :] = proba_0[:, :] / np.sum(proba_0, axis=(0, 1))
            # print(np.sum(proba_0, axis=(0,1)))

            # proba_0 = np.ones((self.img_x, self.img_y, self.n_classes))
            # proba_0 *= 0.4
        self.proba = proba_0

        if lbda_0 is not None:
            assert (lbda_0.shape[0] == self.n_classes)
        else:
            lbda_0 = [1. / self.n_classes for i in range(self.n_classes)]
            # lbda_0 = np.random.random(self.n_classes)
        self.lbda = lbda_0

    def class_proba(self, imgs):
        n_imgs = imgs.shape[2]
        p_c = np.ones((n_imgs, self.n_classes))
        for n in range(n_imgs):
            for c in range(self.n_classes):
                p_c[n, c] = np.nanprod(
                    np.multiply(
                        np.power(self.proba[:, :, c], imgs[:, :, n]),
                        np.power(1 - self.proba[:, :, c],
                                 1 - imgs[:, :, n]))) * self.lbda[c]
                # for i in range(self.img_x):
                # 	for j in range(self.img_y):
                # 		p_c[n, c] *= self.proba[i, j, c]**self.imgs[i, j, n] *\
                # 			(1 - self.proba[i, j, c])**(1 - self.imgs[i, j, n])
                # p_c[n, c] *= self.lbda[c]
        return p_c

    def responsability(self, p_c):
        w = p_c
        marginal = np.sum(w, axis=1)
        for c in range(self.n_classes):
            w[:, c] /= marginal
        return np.nan_to_num(w)

    def update_params(self, w):
        self.lbda = np.sum(w, axis=0) / self.n_imgs

        for c in range(self.n_classes):
            self.proba[:, :, c] = w[0, c] * self.imgs[:, :, 0]
            for n in range(1, self.n_imgs):
                self.proba[:, :, c] += w[n, c] * self.imgs[:, :, n]

        self.proba /= self.lbda * self.n_imgs

    def run_once(self):
        p_c = self.class_proba(self.imgs)
        w = self.responsability(p_c)
        self.update_params(w)
        return np.copy(self.lbda), np.copy(self.proba)

    def __call__(self, imgs):
        p_c = self.class_proba(imgs)
        s = np.sum(p_c, axis=1)
        for c in range(self.n_classes):
            p_c[:, c] /= s
        return np.nan_to_num(p_c)
Ejemplo n.º 4
0
		y.append(s)
	return np.array(y)

if __name__ == "__main__": 
	n = 4
	nsteps = 750
	param_range = 10
	
	plot_interval = 1
	blocking = False

	rng = UnivariateRNG()

	initial_prior_precision = 1e12 * np.eye(n)
	noise_amplitude = 1e1
	model_parameters = np.array([rng.rand_uniform(-param_range, param_range) for i in range(n)])

	b = BayesianLinearRegression(initial_prior_precision, n, noise_amplitude, model_parameters)


	wmu_acc = []
	pred_acc = []

	mean_error = []
	mse = []

	plotting = True

	for i in range(nsteps): 

		wmu, ws, X, y = b()