Пример #1
0
    def __init__(self, imgs, lbls, lbda_0=None, proba_0=None):
        self.imgs = binarize_imgs(imgs)
        self.lbls = lbls

        self.n_classes = 10
        self.n_imgs = self.lbls.shape[0]
        self.img_x = 28
        self.img_y = 28

        self.rng = UnivariateRNG()

        if proba_0 is not None:
            assert (proba_0.shape == (self.img_x, self.img_y, self.n_classes))
        else:
            proba_0 = np.array([
                self.rng.rand_uniform(0.25, 0.75)
                for i in range(self.img_x * self.img_y * self.n_classes)
            ]).reshape((self.img_x, self.img_y, self.n_classes))

            proba_0[:, :] = proba_0[:, :] / np.sum(proba_0, axis=(0, 1))
            # print(np.sum(proba_0, axis=(0,1)))

            # proba_0 = np.ones((self.img_x, self.img_y, self.n_classes))
            # proba_0 *= 0.4
        self.proba = proba_0

        if lbda_0 is not None:
            assert (lbda_0.shape[0] == self.n_classes)
        else:
            lbda_0 = [1. / self.n_classes for i in range(self.n_classes)]
            # lbda_0 = np.random.random(self.n_classes)
        self.lbda = lbda_0
Пример #2
0
class PolyBasisLinearModelRNG(object): 
	def __init__(self, basis, noise_amplitude, model_parameters):
		assert(isinstance(model_parameters, np.ndarray)) 
		assert(len(model_parameters.shape) == 1 and model_parameters.shape[0] >= basis)

		self.n = basis
		self.a = noise_amplitude 
		self.w = model_parameters

		self.r = UnivariateRNG()

	def setw(self,model_parameters): 
		assert(isinstance(model_parameters, np.ndarray)) 
		assert(len(model_parameters.shape) == 1 and model_parameters.shape[0] >= self.n)
		self.w = model_parameters

	def __call__(self): 
		x = 10.0
		while x >= 10.0 or x <= -10.0:
			x = self.r.rand_uniform(-10.0, 10.0)

		y = 0
		for i in range(self.n):
			y += self.w[i] * x ** i 
		y += self.r.rand_normal(0, self.a)

		return x, y
Пример #3
0
	def __init__(self, basis, noise_amplitude, model_parameters):
		assert(isinstance(model_parameters, np.ndarray)) 
		assert(len(model_parameters.shape) == 1 and model_parameters.shape[0] >= basis)

		self.n = basis
		self.a = noise_amplitude 
		self.w = model_parameters

		self.r = UnivariateRNG()
Пример #4
0
    def __init__(self, mean, var):
        self.mean = mean
        self.var = var

        self.mean_est = 0
        self.var_est = 0

        self.n = 0

        self.M2 = 0

        self.r = UnivariateRNG()
Пример #5
0
class SequentialEstimator(object):
    def __init__(self, mean, var):
        self.mean = mean
        self.var = var

        self.mean_est = 0
        self.var_est = 0

        self.n = 0

        self.M2 = 0

        self.r = UnivariateRNG()

    def __call__(self):

        x = self.r.rand_normal(self.mean, self.var)

        self.n += 1

        old_mean_est = float(self.mean_est)
        self.mean_est += (x - old_mean_est) / self.n

        self.M2 = self.M2 + (x - self.mean_est) * (x - old_mean_est)

        # self.var_est = ((self.n - 1) * self.var_est + (x - old_mean_est) * (x - self.mean_est)) / self.n
        self.var_est = self.M2 / self.n

        return self.mean_est, self.var_est, x, self.n
Пример #6
0
	def __init__(self, initial_prior_precision, basis, noise_amplitude, model_parameters): 
		assert(isinstance(initial_prior_precision, np.ndarray)) 

		self.rng = UnivariateRNG()

		self.n = basis

		self.initial_precision = initial_prior_precision

		self.a = np.max(initial_prior_precision) * 1e3 / noise_amplitude
		self.y_mean = 0 
		self.step = 0
		
		self.m_prior = 0
		self.b_prior = initial_prior_precision

		self.Y_c = []
		self.X_c = []

		self.W_mean = np.zeros((n, 1))
		self.W_var = np.zeros((3, 3))

		self.W_s = np.zeros((3, 3))
		self.W_mu = np.zeros(n)

		self.Y_s = 0
		self.Y_mu = 0

		self.linear_model = PolyBasisLinearModelRNG(basis, noise_amplitude, model_parameters)
	def __init__(self, n, mx1, vx1, mx2, vx2, my1, vy1, my2, vy2, basis=3, learning_rate=1e-2): 
		self.n = n
		self.rng = UnivariateRNG()
		self.basis = basis 
		self.params = basis * 2
		self.lr = learning_rate

		rand_points = lambda nb, m, v : \
			np.array([self.rng.rand_normal(m, v) for i in range(nb)])
		
		self.x1 = rand_points(n, mx1, vx1)
		self.y1 = rand_points(n, my1, vy1)

		self.x2 = rand_points(n, mx2, vx2)
		self.y2 = rand_points(n, my2, vy2)

		self.phi = self.make_poly_design_matrix((self.x1, self.y1), (self.x2, self.y2))
		self.w = np.array([self.rng.rand_normal(0., 0.1) for i in range(self.params)])
		self.d = np.array([0. for i in range(n)] + [1. for i in range(n)])
		shuf = np.append(self.phi, np.vstack(self.d), axis=1)
		np.random.shuffle(shuf)
		self.phi = shuf[:, :self.params]
		self.y = shuf[:, self.params]
Пример #8
0
    def __init__(self, imgs, lbls):
        self.imgs = imgs
        self.lbls = lbls
        self.rng = UnivariateRNG()

        self.n_classes = int(np.amax(lbls) - np.amin(lbls)) + 1
        self.n_imgs = self.imgs.shape[2]
        self.img_l = self.imgs.shape[0]

        self.mu = np.reshape([
            self.rng.rand_uniform(0.25, 0.75)
            for i in range(self.img_l**2 * self.n_classes)
        ], (self.img_l, self.img_l, self.n_classes))
        self.mu[:, :] /= np.sum(self.mu, axis=(0, 1))
        for i in range(10):
            print("min %f, max %f, mean %f" %
                  (np.min(self.mu[:, :, i]), np.max(
                      self.mu[:, :, i]), np.mean(self.mu[:, :, i])))
            plt.imshow(self.mu[:, :, i])
            plt.colorbar()
            plt.show()

        self.pi = np.array([1 / self.n_classes for i in range(self.n_classes)])
class LogisticRegression(object): 
	def __init__(self, n, mx1, vx1, mx2, vx2, my1, vy1, my2, vy2, basis=3, learning_rate=1e-2): 
		self.n = n
		self.rng = UnivariateRNG()
		self.basis = basis 
		self.params = basis * 2
		self.lr = learning_rate

		rand_points = lambda nb, m, v : \
			np.array([self.rng.rand_normal(m, v) for i in range(nb)])
		
		self.x1 = rand_points(n, mx1, vx1)
		self.y1 = rand_points(n, my1, vy1)

		self.x2 = rand_points(n, mx2, vx2)
		self.y2 = rand_points(n, my2, vy2)

		self.phi = self.make_poly_design_matrix((self.x1, self.y1), (self.x2, self.y2))
		self.w = np.array([self.rng.rand_normal(0., 0.1) for i in range(self.params)])
		self.d = np.array([0. for i in range(n)] + [1. for i in range(n)])
		shuf = np.append(self.phi, np.vstack(self.d), axis=1)
		np.random.shuffle(shuf)
		self.phi = shuf[:, :self.params]
		self.y = shuf[:, self.params]

	def make_poly_design_matrix(self, xy1, xy2):
		phi = np.zeros((2 * self.n, self.params))

		for ti in range(2):
			v = xy1[ti] 
			for i in range(self.n): 
				for j in range(self.basis): 
					phi[i, ti * self.basis + j] = v[i]**j
		for ti in range(2):
			v = xy2[ti] 
			for i in range(self.n): 
				for j in range(self.basis): 
					phi[i + self.n, ti * self.basis + j] = v[i]**j
		return phi 

	def logistic(self, x): 
		return 1 / (1 + np.exp(-x))

	def __call__(self, x, y): 
		return 1/(1+np.exp(- np.dot(np.array([x**i for i in range(self.basis)] + [y**i for i in range(self.basis)]), self.w)))

	def compute_gradients(self): 
		grad = np.zeros(self.w.shape)
		for j in range(self.params): 
			for i in range(self.n): 
				grad[j] += self.phi[i, j] * \
					(self.logistic(np.dot(self.phi[i, :], self.w)) - self.y[i]) 
		return grad / self.n

	def compute_hessian(self): 
		hess = np.zeros((self.params, self.params))
		for j in range(self.params): 
			for k in range(self.params):
				for i in range(self.n):
					# hess[j, k] += self.phi[i, j]*self.phi[i, k] *\
					# 	(1 / (1 + np.exp(-np.dot(self.phi[i,:], self.w)))**2 - self.y[i]) 
					hess[j, k] += self.phi[i, j] * self.phi[i, k] * (self.logistic(np.dot(self.phi[i, :], self.w)) * (1 - self.logistic(np.dot(self.phi[i, :], self.w))))
		return hess

	def newton_descent(self, grad_w, hess_w): 
		# Matrix suppose to be invertible here
		self.w -= self.lr * np.dot(np.linalg.inv(hess_w), grad_w) 

	def gradient_descent(self, grad_w): 
		self.w -= self.lr * grad_w 

	def optimize_once(self): 
		grad_w = self.compute_gradients() 
		hess_w = self.compute_hessian() 
		# # print(hess_w)
		# self.gradient_descent(grad_w)
		if np.abs(np.linalg.det(hess_w))> 1e-5: 
			self.newton_descent(grad_w, hess_w) 
		# print(np.mean(grad_w))
			print("used newton_descent")
			return np.linalg.norm(hess_w)
		else: 
			# print(grad_w)self
			self.gradient_descent(grad_w)
			return np.linalg.norm(grad_w)
Пример #10
0
class EMAlgorithm(object):
    def __init__(self, imgs, lbls, lbda_0=None, proba_0=None):
        self.imgs = binarize_imgs(imgs)
        self.lbls = lbls

        self.n_classes = 10
        self.n_imgs = self.lbls.shape[0]
        self.img_x = 28
        self.img_y = 28

        self.rng = UnivariateRNG()

        if proba_0 is not None:
            assert (proba_0.shape == (self.img_x, self.img_y, self.n_classes))
        else:
            proba_0 = np.array([
                self.rng.rand_uniform(0.25, 0.75)
                for i in range(self.img_x * self.img_y * self.n_classes)
            ]).reshape((self.img_x, self.img_y, self.n_classes))

            proba_0[:, :] = proba_0[:, :] / np.sum(proba_0, axis=(0, 1))
            # print(np.sum(proba_0, axis=(0,1)))

            # proba_0 = np.ones((self.img_x, self.img_y, self.n_classes))
            # proba_0 *= 0.4
        self.proba = proba_0

        if lbda_0 is not None:
            assert (lbda_0.shape[0] == self.n_classes)
        else:
            lbda_0 = [1. / self.n_classes for i in range(self.n_classes)]
            # lbda_0 = np.random.random(self.n_classes)
        self.lbda = lbda_0

    def class_proba(self, imgs):
        n_imgs = imgs.shape[2]
        p_c = np.ones((n_imgs, self.n_classes))
        for n in range(n_imgs):
            for c in range(self.n_classes):
                p_c[n, c] = np.nanprod(
                    np.multiply(
                        np.power(self.proba[:, :, c], imgs[:, :, n]),
                        np.power(1 - self.proba[:, :, c],
                                 1 - imgs[:, :, n]))) * self.lbda[c]
                # for i in range(self.img_x):
                # 	for j in range(self.img_y):
                # 		p_c[n, c] *= self.proba[i, j, c]**self.imgs[i, j, n] *\
                # 			(1 - self.proba[i, j, c])**(1 - self.imgs[i, j, n])
                # p_c[n, c] *= self.lbda[c]
        return p_c

    def responsability(self, p_c):
        w = p_c
        marginal = np.sum(w, axis=1)
        for c in range(self.n_classes):
            w[:, c] /= marginal
        return np.nan_to_num(w)

    def update_params(self, w):
        self.lbda = np.sum(w, axis=0) / self.n_imgs

        for c in range(self.n_classes):
            self.proba[:, :, c] = w[0, c] * self.imgs[:, :, 0]
            for n in range(1, self.n_imgs):
                self.proba[:, :, c] += w[n, c] * self.imgs[:, :, n]

        self.proba /= self.lbda * self.n_imgs

    def run_once(self):
        p_c = self.class_proba(self.imgs)
        w = self.responsability(p_c)
        self.update_params(w)
        return np.copy(self.lbda), np.copy(self.proba)

    def __call__(self, imgs):
        p_c = self.class_proba(imgs)
        s = np.sum(p_c, axis=1)
        for c in range(self.n_classes):
            p_c[:, c] /= s
        return np.nan_to_num(p_c)
Пример #11
0
class ExpectationMaximization(object):
    def __init__(self, imgs, lbls):
        self.imgs = imgs
        self.lbls = lbls
        self.rng = UnivariateRNG()

        self.n_classes = int(np.amax(lbls) - np.amin(lbls)) + 1
        self.n_imgs = self.imgs.shape[2]
        self.img_l = self.imgs.shape[0]

        self.mu = np.reshape([
            self.rng.rand_uniform(0.25, 0.75)
            for i in range(self.img_l**2 * self.n_classes)
        ], (self.img_l, self.img_l, self.n_classes))
        self.mu[:, :] /= np.sum(self.mu, axis=(0, 1))
        for i in range(10):
            print("min %f, max %f, mean %f" %
                  (np.min(self.mu[:, :, i]), np.max(
                      self.mu[:, :, i]), np.mean(self.mu[:, :, i])))
            plt.imshow(self.mu[:, :, i])
            plt.colorbar()
            plt.show()

        self.pi = np.array([1 / self.n_classes for i in range(self.n_classes)])

    def dumbify(self, p):
        p[p < 0] = np.abs(p[p < 0])
        p[p > 1] = 2 - p[p > 1]
        return p

    def expectation(self, imgs):
        E_z_nc = np.ndarray((imgs.shape[2], self.n_classes))
        for n in range(imgs.shape[2]):
            for c in range(self.n_classes):
                # if (self.mu <= 0.).any():
                # 	print("BAAAAAAAA")

                # E_z_nc[n, c] = np.log(self.pi[c])
                # E_z_nc[n, c] += np.sum(np.multiply(np.log(self.mu[:,:,c]), imgs[:,:, n] ))
                # E_z_nc[n, c] += np.sum(np.multiply(np.log(1 - self.mu[:,:,c]), 1 - imgs[:,:, n] ))

                E_z_nc[n, c] = np.nanprod(
                    np.multiply(
                        np.power(self.mu[:, :, c], imgs[:, :, n]),
                        np.power((1 - self.mu[:, :, c]), (1 - imgs[:, :, n]))))
                # plt.imshow(np.multiply(
                # 			np.power( self.mu[:,:,c], imgs[:,:, n] ),
                # 			np.power( (1 - self.mu[:, :, c]), (1 - imgs[:,:, n]) )
                # 			))
                # plt.colorbar()
                # print(E_z_nc[n,c])
                # plt.show()
                E_z_nc[n, c] *= self.pi[c]

        # E_z_nc = np.log(E_z_nc)

        for c in range(self.n_classes):
            # E_z_nc[:,c] -= np.log(np.sum(np.exp(E_z_nc), axis=1))
            E_z_nc[:, c] /= np.sum(E_z_nc, axis=1)

        return np.nan_to_num(E_z_nc)

    def maximization(self, E_z_nc):
        N_c = np.sum(E_z_nc, axis=0)
        print(N_c)

        img_mean_c = np.ndarray((self.img_l, self.img_l, self.n_classes))
        for c in range(self.n_classes):
            img_mean_c[:, :, c] = E_z_nc[0, c] * self.imgs[:, :, 0]
            for n in range(1, self.n_imgs):
                img_mean_c[:, :, c] += E_z_nc[n, c] * self.imgs[:, :, n]

        img_mean_c[:, :] /= N_c[c]

        self.mu = self.dumbify(np.nan_to_num(img_mean_c))
        self.pi = self.dumbify(np.nan_to_num(N_c / self.n_imgs))
        # self.pi = np.nan_to_num(N_c / np.sum(N_c))
        print(self.pi)
        print(np.sum(N_c))

    def run_once(self):
        E_z_nc = self.expectation(self.imgs)
        self.maximization(E_z_nc)

    def __call__(self, imgs):
        return self.expectation(imgs)
Пример #12
0
	for i in range(x.shape[0]):
		s = 0 
		for j in range(w.shape[0]): 
			s += w[j] * x[i] ** j
		y.append(s)
	return np.array(y)

if __name__ == "__main__": 
	n = 4
	nsteps = 750
	param_range = 10
	
	plot_interval = 1
	blocking = False

	rng = UnivariateRNG()

	initial_prior_precision = 1e12 * np.eye(n)
	noise_amplitude = 1e1
	model_parameters = np.array([rng.rand_uniform(-param_range, param_range) for i in range(n)])

	b = BayesianLinearRegression(initial_prior_precision, n, noise_amplitude, model_parameters)


	wmu_acc = []
	pred_acc = []

	mean_error = []
	mse = []

	plotting = True