Ejemplo n.º 1
0
class PolyBasisLinearModelRNG(object): 
	def __init__(self, basis, noise_amplitude, model_parameters):
		assert(isinstance(model_parameters, np.ndarray)) 
		assert(len(model_parameters.shape) == 1 and model_parameters.shape[0] >= basis)

		self.n = basis
		self.a = noise_amplitude 
		self.w = model_parameters

		self.r = UnivariateRNG()

	def setw(self,model_parameters): 
		assert(isinstance(model_parameters, np.ndarray)) 
		assert(len(model_parameters.shape) == 1 and model_parameters.shape[0] >= self.n)
		self.w = model_parameters

	def __call__(self): 
		x = 10.0
		while x >= 10.0 or x <= -10.0:
			x = self.r.rand_uniform(-10.0, 10.0)

		y = 0
		for i in range(self.n):
			y += self.w[i] * x ** i 
		y += self.r.rand_normal(0, self.a)

		return x, y
Ejemplo n.º 2
0
class SequentialEstimator(object):
    def __init__(self, mean, var):
        self.mean = mean
        self.var = var

        self.mean_est = 0
        self.var_est = 0

        self.n = 0

        self.M2 = 0

        self.r = UnivariateRNG()

    def __call__(self):

        x = self.r.rand_normal(self.mean, self.var)

        self.n += 1

        old_mean_est = float(self.mean_est)
        self.mean_est += (x - old_mean_est) / self.n

        self.M2 = self.M2 + (x - self.mean_est) * (x - old_mean_est)

        # self.var_est = ((self.n - 1) * self.var_est + (x - old_mean_est) * (x - self.mean_est)) / self.n
        self.var_est = self.M2 / self.n

        return self.mean_est, self.var_est, x, self.n
class LogisticRegression(object): 
	def __init__(self, n, mx1, vx1, mx2, vx2, my1, vy1, my2, vy2, basis=3, learning_rate=1e-2): 
		self.n = n
		self.rng = UnivariateRNG()
		self.basis = basis 
		self.params = basis * 2
		self.lr = learning_rate

		rand_points = lambda nb, m, v : \
			np.array([self.rng.rand_normal(m, v) for i in range(nb)])
		
		self.x1 = rand_points(n, mx1, vx1)
		self.y1 = rand_points(n, my1, vy1)

		self.x2 = rand_points(n, mx2, vx2)
		self.y2 = rand_points(n, my2, vy2)

		self.phi = self.make_poly_design_matrix((self.x1, self.y1), (self.x2, self.y2))
		self.w = np.array([self.rng.rand_normal(0., 0.1) for i in range(self.params)])
		self.d = np.array([0. for i in range(n)] + [1. for i in range(n)])
		shuf = np.append(self.phi, np.vstack(self.d), axis=1)
		np.random.shuffle(shuf)
		self.phi = shuf[:, :self.params]
		self.y = shuf[:, self.params]

	def make_poly_design_matrix(self, xy1, xy2):
		phi = np.zeros((2 * self.n, self.params))

		for ti in range(2):
			v = xy1[ti] 
			for i in range(self.n): 
				for j in range(self.basis): 
					phi[i, ti * self.basis + j] = v[i]**j
		for ti in range(2):
			v = xy2[ti] 
			for i in range(self.n): 
				for j in range(self.basis): 
					phi[i + self.n, ti * self.basis + j] = v[i]**j
		return phi 

	def logistic(self, x): 
		return 1 / (1 + np.exp(-x))

	def __call__(self, x, y): 
		return 1/(1+np.exp(- np.dot(np.array([x**i for i in range(self.basis)] + [y**i for i in range(self.basis)]), self.w)))

	def compute_gradients(self): 
		grad = np.zeros(self.w.shape)
		for j in range(self.params): 
			for i in range(self.n): 
				grad[j] += self.phi[i, j] * \
					(self.logistic(np.dot(self.phi[i, :], self.w)) - self.y[i]) 
		return grad / self.n

	def compute_hessian(self): 
		hess = np.zeros((self.params, self.params))
		for j in range(self.params): 
			for k in range(self.params):
				for i in range(self.n):
					# hess[j, k] += self.phi[i, j]*self.phi[i, k] *\
					# 	(1 / (1 + np.exp(-np.dot(self.phi[i,:], self.w)))**2 - self.y[i]) 
					hess[j, k] += self.phi[i, j] * self.phi[i, k] * (self.logistic(np.dot(self.phi[i, :], self.w)) * (1 - self.logistic(np.dot(self.phi[i, :], self.w))))
		return hess

	def newton_descent(self, grad_w, hess_w): 
		# Matrix suppose to be invertible here
		self.w -= self.lr * np.dot(np.linalg.inv(hess_w), grad_w) 

	def gradient_descent(self, grad_w): 
		self.w -= self.lr * grad_w 

	def optimize_once(self): 
		grad_w = self.compute_gradients() 
		hess_w = self.compute_hessian() 
		# # print(hess_w)
		# self.gradient_descent(grad_w)
		if np.abs(np.linalg.det(hess_w))> 1e-5: 
			self.newton_descent(grad_w, hess_w) 
		# print(np.mean(grad_w))
			print("used newton_descent")
			return np.linalg.norm(hess_w)
		else: 
			# print(grad_w)self
			self.gradient_descent(grad_w)
			return np.linalg.norm(grad_w)