def calcN(classKernels, trainLabels): N = zeros((len(trainLabels), len(trainLabels))) for i, l in enumerate(unique(trainLabels)): numExamplesWithLabel = len(where(trainLabels == l)[0]) Idiff = identity(numExamplesWithLabel, Float64) - (1.0 / numExamplesWithLabel) * ones(numExamplesWithLabel, Float64) firstDot = dot(classKernels[i], Idiff) labelTerm = dot(firstDot, transpose(classKernels[i])) N += labelTerm N = nan_to_num(N) #make N more numerically stable #if I had more time, I would train this parameter, but I don't additionToN = ((mean(diag(N)) + 1) / 100.0) * identity(N.shape[0], Float64) N += additionToN #make sure N is invertable for i in range(1000): try: inv(N) except LinAlgError: #doing this to make sure the maxtrix is invertable #large value supported by section titled #"numerical issues and regularization" in the paper N += additionToN return N
def fit(self, func, params_dist, pre_X = None, pre_y = None): time_start = time.time() if self.random_state is not None: np.random.seed(self.random_state) grid, grid_scaled = self.get_grid(params_dist) mu = np.zeros(self.n_grid) + self.mu_prior sigma = np.ones(self.n_grid)*self.sigma_prior X = np.zeros((self.max_iter, self.n_params)) X_scaled = np.matrix(np.zeros((self.max_iter, self.n_params))) y = np.zeros(self.max_iter) if (pre_X is not None) and (pre_y is not None): pre_X_mat, pre_X_scaled = self.scale(pre_X, pre_y, params_dist) X = np.vstack([pre_X_mat, X]) X_scaled = np.vstack([pre_X_scaled, X_scaled]) y = np.concatenate([pre_y, y]) pre_len = len(pre_y) else: pre_len = 0 if self.verbose: params_name = [i[:9] for i in self.params_name] logger.info('%4s|%9s|%9s|%9s', 'Iter','Func','Max', '|'.join(['{:9s}'.format(i) for i in params_name])) for i in xrange(pre_len, pre_len + self.max_iter): #beta = (i + 1)**2 if self.beta_mode == 'log': d = len(self.params_name) beta = 2*np.log(2 *(i + 1)**2 * np.pi**2 /.3) + \ 2*d*np.log( (i+1)**2 * d * 2) elif self.beta_mode == 'linear': beta = i + 1 elif self.beta_mode == 'square': beta = (i + 1)**2 else: logger.error("What The Hell. Change Beta Parameter") idx = np.argmax(mu + np.sqrt(beta)*sigma) X[i,:] = grid[idx] X_scaled[i] = grid_scaled[idx] y[i] = func(**dict(zip(self.params_name, X[i]))) KT = self.kernel(X_scaled[:(i + 1)], X_scaled[:(i + 1)])*\ self.sigma_prior invKT = inv(KT + self.sig**2*identity(i + 1)) grid, grid_scaled = self.get_grid(params_dist) kT = self.kernel(X_scaled[:(i + 1)], grid_scaled)*\ self.sigma_prior**2 mu = self.mu_prior + \ kT.T.dot(invKT).dot(y[:(i + 1)] - self.mu_prior) sigma2 = np.ones(self.n_grid)*self.sigma_prior**2 - \ diag(kT.T.dot(invKT).dot(kT)) sigma = np.sqrt(sigma2) ### Save Data if self.verbose: logger.info('%4d|%9.4g|%9.4g|%s', i, y[i], np.max(y[:(i + 1)]), '|'.join(['{:9.4g}'.format(ii) for ii in X[i]])) if time.time() - time_start > self.time_budget: break self.X = X[:(i + 1)] self.y = y[:(i + 1)] self.mu = mu self.beta = beta self.sigma = sigma self.grid = grid
def doubleU(phi, l, tVector): #can't call lamba by it's name, because that's a reserved word in python #so I'm calling it l lIdentity = l*identity(phi.shape[1]) phiDotPhi = dot(phi.transpose(), phi) firstTerm = inv(lIdentity + phiDotPhi) phiDotT = dot(phi.transpose(), tVector) return squeeze(dot(firstTerm, phiDotT))
def __gmmEm__(self): self.mean = kmeans2(self.data, self.K)[0] self.c = asarray([1.0/self.K]*self.K) self.covm = asarray([identity(self.K)]*self.K) self.p = ndarray((self.N,self.K),dtype='float32') while self.it > 0: self.it -=1 self.__calculateP__() #self.__Estep__() self.__Mstep__()
def trainKFD(trainKernel, trainLabels): classKernels = getClassKernels(trainKernel, trainLabels) M = calcM(classKernels, trainLabels) N = calcN(classKernels, trainLabels) ''' print "train kernel:",trainKernel print "Class kernels:", classKernels print "M",M print "N",N ''' try: solutionMatrix = dot(inv(N), M) except LinAlgError: #if we get a singular matrix here, there isn't much we can do about it #just skip this configuration solutionMatrix = identity(N.shape[0], Float64) solutionMatrix = nan_to_num(solutionMatrix) eVals, eVects = eig(solutionMatrix) #find the 'leading' term i.e. find the eigenvector with the highest eigenvalue alphaVect = eVects[:, absolute(eVals).argmax()].real.astype(Float64) trainProjections = dot(trainKernel, alphaVect) ''' print 'alpha = ', alphaVect print 'train kernel = ', trainKernel print 'train projction = ', trainProjections ''' #train sigmoid based on evaluation accuracy #accuracyError = lambda x: 100.0 - evaluations(trainLabels, classifyKFDValues(trainProjections, *list(x)))[0] accuracyError = lambda x: 100.0 - evaluations(trainLabels, classifyKFDValues(trainProjections, *x))[0] #get an initial guess by brute force #ranges = ((-100, 100, 1), (-100, 100, 1)) #x0 = brute(accuracyError, ranges) #popt = minimize(accuracyError, x0.tolist(), method="Powell").x rc = LSFAIL niter = 0 i = 0 while rc in (LSFAIL, INFEASIBLE, CONSTANT, NOPROGRESS, USERABORT, MAXFUN) or niter <= 1: if i == 10: break #get a 'smarter' x0 #ranges = ((-1000, 1000, 100), (-1000, 1000, 100)) ranges = ((-10**(i + 1), 10**(i + 1), 10**i),) * 2 x0 = brute(accuracyError, ranges) (popt, niter, rc) = fmin_tnc(accuracyError, x0, approx_grad=True) #popt = fmin_tnc(accuracyError, x0.tolist(), approx_grad=True)[0] i += 1 return (alphaVect, popt)
def __init__(self, name: str, pose: Pose = Pose(), init_pose: Pose = Pose(), vel: ndarray = array([0.0, 0.0, 0.0]), ang_vel: ndarray = array([0.0, 0.0, 0.0]), ref_frame: str = None, mass: float = 1.0, inertia: matrix = identity(3), ambient_color: list = None, diffuse_color: list = None): self.name = name self.pose = pose # the init pose is deep copied to be sure it won't be modified involuntary self.init_pose = cp.deepcopy(init_pose) self.vel = vel.astype(dtype=float).reshape(3, 1) self.ang_vel = ang_vel.astype(dtype=float).reshape(3, 1) if ref_frame is None: self.ref_frame = None self.frame = None else: self.ref_frame = ref_frame self.frame = Frame("frame_{0}".format(self.name), self.pose, self.ref_frame) self.mass = mass self.inertia = inertia if ambient_color is None: self.ambient_color = [0.0, 0.0, 0.0, 0.0] else: self.ambient_color = ambient_color if diffuse_color is None: self.diffuse_color = [0.0, 0.0, 0.0, 0.0] else: self.diffuse_color = diffuse_color
def matrix_power(M, n, mod_val): # Implementation shadows numpy's matrix_power, but with modulo included M = asanyarray(M) if len(M.shape) != 2 or M.shape[0] != M.shape[1]: raise ValueError("input must be a square array") #if not issubdtype(type(n), int): # raise TypeError("exponent must be an integer") from numpy.linalg import inv if n==0: M = M.copy() M[:] = identity(M.shape[0]) return M elif n<0: M = inv(M) n *= -1 result = M % mod_val if n <= 3: for _ in range(n-1): result = dot(result, M) % mod_val return result # binary decompositon to reduce the number of matrix # multiplications for n > 3 beta = binary_repr(n) Z, q, t = M, 0, len(beta) while beta[t-q-1] == '0': Z = dot(Z, Z) % mod_val q += 1 result = Z for k in range(q+1, t): Z = dot(Z, Z) % mod_val if beta[t-k-1] == '1': result = dot(result, Z) % mod_val return result % mod_val
def matrix_power(M, n): """ Raise a square matrix to the (integer) power `n`. For positive integers `n`, the power is computed by repeated matrix squarings and matrix multiplications. If ``n == 0``, the identity matrix of the same shape as M is returned. If ``n < 0``, the inverse is computed and then raised to the ``abs(n)``. Parameters ---------- M : ndarray or matrix object Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``, with `m` a positive integer. n : int The exponent can be any integer or long integer, positive, negative, or zero. Returns ------- M**n : ndarray or matrix object The return value is the same shape and type as `M`; if the exponent is positive or zero then the type of the elements is the same as those of `M`. If the exponent is negative the elements are floating-point. Raises ------ LinAlgError If the matrix is not numerically invertible. See Also -------- matrix Provides an equivalent function as the exponentiation operator (``**``, not ``^``). Examples -------- >>> from numpy import linalg as LA >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit >>> LA.matrix_power(i, 3) # should = -i array([[ 0, -1], [ 1, 0]]) >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix matrix([[ 0, -1], [ 1, 0]]) >>> LA.matrix_power(i, 0) array([[1, 0], [0, 1]]) >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements array([[ 0., 1.], [-1., 0.]]) Somewhat more sophisticated example >>> q = np.zeros((4, 4)) >>> q[0:2, 0:2] = -i >>> q[2:4, 2:4] = i >>> q # one of the three quaternion units not equal to 1 array([[ 0., -1., 0., 0.], [ 1., 0., 0., 0.], [ 0., 0., 0., 1.], [ 0., 0., -1., 0.]]) >>> LA.matrix_power(q, 2) # = -np.eye(4) array([[-1., 0., 0., 0.], [ 0., -1., 0., 0.], [ 0., 0., -1., 0.], [ 0., 0., 0., -1.]]) """ M = asanyarray(M) if len(M.shape) != 2 or M.shape[0] != M.shape[1]: raise ValueError("input must be a square array") if not issubdtype(type(n), int): raise TypeError("exponent must be an integer") from numpy.linalg import inv if n == 0: M = M.copy() M[:] = identity(M.shape[0]) return M elif n < 0: M = inv(M) n *= -1 result = M if n <= 3: for _ in range(n - 1): result = N.dot(result, M) return result # binary decomposition to reduce the number of Matrix # multiplications for n > 3. beta = binary_repr(n) Z, q, t = M, 0, len(beta) while beta[t - q - 1] == '0': Z = N.dot(Z, Z) q += 1 result = Z for k in range(q + 1, t): Z = N.dot(Z, Z) if beta[t - k - 1] == '1': result = N.dot(result, Z) return result
def __init__(self, mean): self.mean = mean self.N, self.dim = mean.shape self.covm = asarray([identity(self.dim)] * self.N) self.c = asarray([1.0 / self.N] * self.N)
def GPUCB(func = f, kernel = DoubleExponential, params_dist = {'x': Uniform(start = 0, end = 5)}, prev_X = None, prev_y = None, sig = .1, mu_prior = 0, sigma_prior = 1, n_grid = 100, n_iter = 10, seed = 2, time_budget = 36000): time_start = time.time() np.random.seed(seed) n_params = len(params_dist) params_name = params_dist.keys() grid, grid_scaled = GetRandGrid(n_grid, params_dist) mu = np.zeros(n_grid) + mu_prior sigma = np.ones(n_grid)*sigma_prior X = np.empty((n_iter, n_params)) X_scaled = np.matrix(np.empty((n_iter, n_params))) y = np.empty(n_iter) logger.info("%4s |%9s |%9s |%s", "Iter", "Func", "Max", '|'.join(['{:6s}'.format(i) for i in params_name])) for i in xrange(n_iter): #beta = 2*np.log((i+1)**2*2*np.pi**2/3/.1) + \ # 2*n_params*np.log((i+1)**2*n_params) beta = (i+1)**2 #ipdb.set_trace() idx = np.argmax(mu + np.sqrt(beta)*sigma) X[i,:] = grid[idx] X_scaled[i] = grid_scaled[idx] y[i] = func(**dict(zip(params_name, X[i]))) invKT = inv(kernel(X_scaled[:i+1], X_scaled[:i+1])*sigma_prior**2 + sig**2*identity(i + 1)) grid, grid_scaled = GetRandGrid(n_grid, params_dist) kT = kernel(X_scaled[:i+1], grid_scaled)*sigma_prior**2 mu = mu_prior + kT.T.dot(invKT).dot(y[:i+1] - mu_prior) sigma2 = np.ones(n_grid)*sigma_prior**2 - diag(kT.T.dot(invKT).dot(kT)) sigma = np.sqrt(sigma2) logger.info("%4d |%9.4g |%9.4g |%s" , i, y[i], np.max(y[:i+1]), '|'.join(['{:6.2g}'.format(i) for i in X[i]])) if time.time() - time_start > time_budget: break ipdb.set_trace() if True: figure(1); plt.clf(); xlim((0,5)); ylim(-4,10); index = np.argsort(grid[:,0]) gr = grid[:,0] plot(gr[index], mu[index], color = 'red', label = "Mean") plot(gr[index], mu[index] + sigma[index], color = 'blue', label = "Mean + Sigma") plot(gr[index], mu[index] - sigma[index], color = 'blue', label = "Mean - Sigma") plot(X[:i+1,0], y[:i+1], 'o', color = 'green', label = "Eval Points") plot(np.linspace(0,5, num = 500),func(np.linspace(0,5, num = 500)), color = 'green', label = "True Func") plot(gr[index], mu[index] + sqrt(beta)*sigma[index], color = 'yellow', label = "Mean + sqrt(B)*Sigma") plt.grid() legend(loc = 2) show() return {'X': X, 'y': y, 'mu': mu, 'beta': beta, 'sigma': sigma, 'grid': grid}
def matrix_power(M, n): """ Raise a square matrix to the (integer) power `n`. For positive integers `n`, the power is computed by repeated matrix squarings and matrix multiplications. If ``n == 0``, the identity matrix of the same shape as M is returned. If ``n < 0``, the inverse is computed and then raised to the ``abs(n)``. Parameters ---------- M : ndarray or matrix object Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``, with `m` a positive integer. n : int The exponent can be any integer or long integer, positive, negative, or zero. Returns ------- M**n : ndarray or matrix object The return value is the same shape and type as `M`; if the exponent is positive or zero then the type of the elements is the same as those of `M`. If the exponent is negative the elements are floating-point. Raises ------ LinAlgError If the matrix is not numerically invertible. See Also -------- matrix Provides an equivalent function as the exponentiation operator (``**``, not ``^``). Examples -------- >>> from numpy import linalg as LA >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit >>> LA.matrix_power(i, 3) # should = -i array([[ 0, -1], [ 1, 0]]) >>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix matrix([[ 0, -1], [ 1, 0]]) >>> LA.matrix_power(i, 0) array([[1, 0], [0, 1]]) >>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements array([[ 0., 1.], [-1., 0.]]) Somewhat more sophisticated example >>> q = np.zeros((4, 4)) >>> q[0:2, 0:2] = -i >>> q[2:4, 2:4] = i >>> q # one of the three quarternion units not equal to 1 array([[ 0., -1., 0., 0.], [ 1., 0., 0., 0.], [ 0., 0., 0., 1.], [ 0., 0., -1., 0.]]) >>> LA.matrix_power(q, 2) # = -np.eye(4) array([[-1., 0., 0., 0.], [ 0., -1., 0., 0.], [ 0., 0., -1., 0.], [ 0., 0., 0., -1.]]) """ M = asanyarray(M) if len(M.shape) != 2 or M.shape[0] != M.shape[1]: raise ValueError("input must be a square array") if not issubdtype(type(n), int): raise TypeError("exponent must be an integer") from numpy.linalg import inv if n==0: M = M.copy() M[:] = identity(M.shape[0]) return M elif n<0: M = inv(M) n *= -1 result = M if n <= 3: for _ in range(n-1): result=N.dot(result, M) return result # binary decomposition to reduce the number of Matrix # multiplications for n > 3. beta = binary_repr(n) Z, q, t = M, 0, len(beta) while beta[t-q-1] == '0': Z = N.dot(Z, Z) q += 1 result = Z for k in range(q+1, t): Z = N.dot(Z, Z) if beta[t-k-1] == '1': result = N.dot(result, Z) return result
def identity(n,typecode='l', dtype=None): """identity(n) returns the identity 2-d array of shape n x n. """ dtype = convtypecode(typecode, dtype) return nn.identity(n, dtype)
gluSphere(quad, self.radius, 20, 20) class Parallepiped(Solid): def __init__(self, length: int = 1, width: int = 1, height: int = 1, *args, **kwargs): Solid.__init__(self, *args, **kwargs) self.length = length self.width = width self.height = height def draw(self, from_fixed_frame: bool = True): self.opgl_move_to_pose(from_fixed_frame) self.apply_material() # draw parallelepiped quad = gluNewQuadric() gluQuadricDrawStyle(quad, GLU_FILL) gluQuadricTexture(quad, True) gluQuadricNormals(quad, GLU_SMOOTH) glScalef(self.length, self.width, self.height) glutSolidCube(1) if __name__ == "__main__": print(identity(3))
def SN(alpha, beta, phi): betaPhiTphi = beta * dot(phi.transpose(), phi) alphaI = alpha * identity(betaPhiTphi.shape[0]) SNinverse = alphaI + betaPhiTphi return inv(SNinverse)
def __init__(self, mass: float = 1.0, inertia: matrix = identity(3)): self.mass = mass self.inertia = inertia
def matrix_power(M,n): """ Raise a square matrix to the (integer) power n. For positive integers n, the power is computed by repeated matrix squarings and matrix multiplications. If n=0, the identity matrix of the same type as M is returned. If n<0, the inverse is computed and raised to the exponent. Parameters ---------- M : array_like Must be a square array (that is, of dimension two and with equal sizes). n : integer The exponent can be any integer or long integer, positive negative or zero. Returns ------- M to the power n The return value is a an array the same shape and size as M; if the exponent was positive or zero then the type of the elements is the same as those of M. If the exponent was negative the elements are floating-point. Raises ------ LinAlgException If the matrix is not numerically invertible, an exception is raised. See Also -------- The matrix() class provides an equivalent function as the exponentiation operator. Examples -------- >>> np.linalg.matrix_power(np.array([[0,1],[-1,0]]),10) array([[-1, 0], [ 0, -1]]) """ M = asanyarray(M) if len(M.shape) != 2 or M.shape[0] != M.shape[1]: raise ValueError("input must be a square array") if not issubdtype(type(n),int): raise TypeError("exponent must be an integer") from numpy.linalg import inv if n==0: M = M.copy() M[:] = identity(M.shape[0]) return M elif n<0: M = inv(M) n *= -1 result = M if n <= 3: for _ in range(n-1): result=N.dot(result,M) return result # binary decomposition to reduce the number of Matrix # multiplications for n > 3. beta = binary_repr(n) Z,q,t = M,0,len(beta) while beta[t-q-1] == '0': Z = N.dot(Z,Z) q += 1 result = Z for k in range(q+1,t): Z = N.dot(Z,Z) if beta[t-k-1] == '1': result = N.dot(result,Z) return result