def load_graph_laplacian(coo, wfunc=np.ones_like): if len(coo.shape) < 2: coo = np.reshape(coo, (-1, 3)) if coo.shape[0] <= 1: return np.array([[1.0]]) coo = coo[coo[:, 0] != 0.0, :] row = coo[:, 1].astype('int') col = coo[:, 2].astype('int') data = wfunc(coo[:, 0].astype('float')) aapr = coo_matrix((data, (row, col))).todense() aa = np.triu(aapr, k=1) aa += aa.T dd = np.diagflat(np.sum(np.abs(aa), axis=-1)) ll = aa - dd #print(data) return ll
def random_posdef(d, mu, L): Ort = ortho_group.rvs(d) D = np.diagflat(np.random.uniform(low=mu, high=L, size=d)) return Ort @ D @ Ort.T
def random_mat(d, mu, L): Ort1 = ortho_group.rvs(d) Ort2 = ortho_group.rvs(d) D = np.diagflat(np.random.uniform(low=mu, high=L, size=d)) return Ort1 @ D @ Ort2.T
import hmc import autograd.numpy as np # Generate random precision and mean parameters for a Gaussian n_dim = 50 rng = np.random.RandomState(seed=1234) rnd_eigvec, _ = np.linalg.qr(rng.normal(size=(n_dim, n_dim))) rnd_eigval = np.exp(rng.normal(size=n_dim) * 2) prec = (rnd_eigvec / rnd_eigval) @ rnd_eigvec.T mean = rng.normal(size=n_dim) # Eigenvalue decomposition w, v = np.linalg.eig(prec) prec = np.diagflat(w) # Deine potential energy (negative log density) for the Gaussian target # distribution (gradient will be automatically calculated using autograd) def pot_energy(pos): pos_minus_mean = pos - mean return 0.5 * pos_minus_mean @ prec @ pos_minus_mean # Specify Hamiltonian system with isotropic Gaussian kinetic energy system = hmc.systems.EuclideanMetricSystem(pot_energy) # H = 0.5 * mom @ inv(prec) @ mom + 0.5 * pos_minus_mean @ prec @ pos_minus_mean # metric = hmc.metrics.DenseEuclideanMetric(prec) # system = hmc.systems.EuclideanMetricSystem(pot_energy, metric) # Hamiltonian is separable therefore use explicit leapfrog integrator
def boosting(self,F,y,its): ''' boosting for classification ''' # settings N = np.shape(F)[1] # length of weights w = np.zeros((N,1)) # initialization epsilon = 10**(-8) w_history = [copy.deepcopy(w)] # record each weight for plotting # pre-computations for more effecient run y_diag = np.diagflat(y) M = np.dot(y_diag,F) F_2 = F**2 c = np.dot(M,w) # outer loop - each is a sweep through every variable once for i in range(its): # inner loop cost_vals = [] w_vals = [] for t in range(N): w_temp = copy.deepcopy(w) w_t = copy.deepcopy(w[t]) # create 'a' vector for this update m_t = M[:,t] m_t.shape = (len(m_t),1) temp_t = m_t*w_t c = c - temp_t a_t = np.exp(-c) # create first derivative via components exp_t = np.exp(temp_t) num = a_t*m_t den = exp_t + a_t dgdw = - sum([e/r for e,r in zip(num,den)]) # create second derivative via components f_t = F_2[:,t] f_t.shape = (len(f_t),1) num = a_t*f_t*exp_t den = den**2 dgdw2 = sum([e/r for e,r in zip(num,den)]) # take newton step w_t = w_t - dgdw/(dgdw2 + epsilon) # temp history w_temp[t] = w_t val = self.softmax(w_temp) cost_vals.append(val) w_vals.append(w_t) # update computation temp_t = M[:,t]*w_t temp_t.shape = (len(temp_t),1) c = c + temp_t # determine biggest winner ind_win = np.argmin(cost_vals) w_win = w_vals[ind_win] w[ind_win] += copy.deepcopy(w_win) # update computation temp_t = M[:,ind_win]*w_win temp_t.shape = (len(temp_t),1) c = c + temp_t # record weights at each step for kicks w_history.append(copy.deepcopy(w)) # update counter and tol measure i+=1 return w_history
def initial_momentum(state): n = len(state) mu = np.zeros(n) cov = np.diagflat(np.ones(n)) new = np.random.multivariate_normal(mu, cov) return new