Esempio n. 1
0
    def test_kernel_values(self):
        self.assertEqual(kernel.linear_kernel(np.array([1, 0]), np.array([0, 0])), 0)
        self.assertEqual(kernel.linear_kernel(np.ones(10), np.ones(10)), 10)

        desc = np.zeros(2).reshape(2, 1)
        self.assertEqual(kernel.gaussian_kernel(desc, desc, 10)[0, 0], 1)
        desc1 = np.array([sqrt(2)*3, 0]).reshape(2, 1)
        desc2 = np.zeros(2).reshape(2, 1)
        self.assertAlmostEqual(kernel.gaussian_kernel(desc1, desc2, 3)[0, 0], exp(-1), 7)
Esempio n. 2
0
    def predict(self, X):
        '''
        Parameters
        ----------
        X : shape (n_samples, n_features)
            Predicting data

        Returns
        -------
        y : shape (n_samples,)
            Predicted value per sample.
        '''
        X = np.insert(X, 0, 1, axis=1)
        n_samples, n_features = X.shape

        W = np.zeros((n_samples, n_features))

        for i in range(n_samples):
            Weights = np.diag(
                kernel.gaussian_kernel(self.__X, X[i].reshape(1, -1),
                                       self.__sigma).ravel())
            W[i] = (np.linalg.pinv(self.__X.T.dot(Weights).dot(self.__X)).dot(
                self.__X.T).dot(Weights).dot(self.__y)).ravel()

        return np.sum(X * W, axis=1)
Esempio n. 3
0
    def fit(self, X, n_clusters, sigma):
        '''
        Parameters
        ----------
        X : shape (n_samples, n_features)
            Training data
        n_clusters : The number of clusters
        sigma : Parameter for gaussian kernel

        Returns
        -------
        y : shape (n_samples,)
            Predicted cluster label per sample.
        '''
        n_samples = X.shape[0]

        W = kernel.gaussian_kernel(X, X, sigma)
        W[range(n_samples), range(n_samples)] = 0

        D = np.diag(np.sum(W, axis=1))

        L = np.linalg.inv(D).dot(D - W)

        eig_values, eig_vectors = np.linalg.eigh(L)
        U = eig_vectors[:, :n_clusters]

        return k_means.KMeans().fit(U, n_clusters, 100)
Esempio n. 4
0
def weighted_Hessian_SVGD(x, Dlogp, A, w):
    n, d = x.shape
    invA = np.linalg.inv(A)
    # K, grad_K = gaussian_kernel(A).calculate_kernel(x)
    K, grad_K = gaussian_kernel(A, adaptive=True).calculate_kernel(x)
    velocity = np.sum(w[None, :, None] * K[:, :, None] * Dlogp[None, :, :],
                      axis=1) + np.sum(w[:, None, None] * grad_K, axis=0)
    velocity = np.matmul(velocity, invA)
    return velocity
def compute_alpha_with_fairness(theta, bias, Xtr, Ytr, Sen, Xref, sigma, B,
                                tau):
    Kt_ref = gaussian_kernel(Xtr, Xref, sigma)

    ##### compute the alpha, we get the linear and quardtic coefficient
    loss_vector, XT_W = logistic_regression_loss(
        theta, bias, Xtr, Ytr)  ### compute loss of each alpha
    ###linear coefficient
    p_matrix = Kt_ref * loss_vector.reshape(-1, 1)
    p = p_matrix.sum(axis=0)
    p = p.astype('double')

    ### quardtic coefficient
    Q = 2 * np.zeros((len(Xref), len(Xref)))
    Q = Q.astype('double')

    ### Compute A, b equality constraint
    A = np.sum(Kt_ref, axis=0).reshape(1, -1)
    b = [1.0 * len(Ytr)]
    A = A.astype('double')

    ### compute fariness constraint
    ### compute first term of fairness constraint
    Sen_XT_W = np.multiply(Sen, XT_W)
    fair1_matrix = Kt_ref * Sen_XT_W.reshape(-1, 1)
    fair1 = fair1_matrix.sum(axis=0)
    ### compute second term of fairness constraint
    fair2_matrix = Kt_ref * Sen.reshape(-1, 1)
    fair2 = fair2_matrix.sum(axis=0) / len(Sen)
    distance_sum = XT_W.sum()
    fair2 = fair2 * distance_sum
    fair = (fair1 - fair2).reshape(-1, 1)
    ### G(w, alpha) > tau and G(w, alpha) > -tau
    fair_G = np.concatenate((fair.transpose(), -fair.transpose()))
    fair_h = np.full((2, 1), tau)

    ### compute G and h inequality constraint and combine with fairness inequality
    lower = -np.identity(len(Xref))
    upper = np.identity(len(Xref))
    G = np.concatenate((lower, upper), axis=0)
    G = np.concatenate((G, fair_G))
    lower_h = np.full((len(Xref), 1), 0)
    upper_h = np.full((len(Xref), 1), B)

    h = np.concatenate((lower_h, upper_h), axis=0)
    h = np.concatenate((h, fair_h), axis=0)
    h = h.astype('double')
    Q, p, G, h, A, b = -matrix(Q), -matrix(p), matrix(G), matrix(h), matrix(
        A), matrix(b)

    sol = solvers.qp(Q, p, G, h, A, b)
    alpha = np.array(sol['x'])
    weight = np.matmul(Kt_ref, alpha)
    ratio = np.sum(weight * Sen) / len(Sen)
    return weight, ratio
Esempio n. 6
0
def Stein_sampler(model, n_particles, max_iter, step_size, seed = 44, 
				adagrad = True, kernel_type = 'mixture'):
	np.random.seed(seed)
	d = model.dimension
	sig = 1.5
	
	x_initial = sig * np.random.randn(n_particles, d)	# set initial particles as standard normal
	
	x = x_initial

	EVOLVEX = np.zeros([max_iter+1, n_particles, d])
	EVOLVEX[0,:,:] = x

	adag = np.zeros([n_particles, d])	# for adagrad
	for i in range(max_iter):
		grad_logp = model.grad_log_p(x)		# gradient information for each particles: n*d
		Hs = model.Hessian_log_p(x)			# Hessian information for each particles: n*d*d
		A = np.mean(Hs, axis = 0)			# Average Hessian: d*d

		if kernel_type == 'newton':			# SVN
			v = SVN(x, grad_logp, Hs)
		elif kernel_type == 'mixture':		# matrix SVGD(mixture)
			v = mixture_hessian_SVGD(x, grad_logp, Hs)
		else:
			if kernel_type == 'gaussian':	# matrix SVGD(average)
				kernel = gaussian_kernel(A)
				B = model.inv_avg_Hessian(A)
			else:							# vanilla SVGD
				if i > 30:
					kernel = rbf_kernel(d, decay = True)
				else:
					kernel = rbf_kernel(d)
				B = np.eye(d)
			v = matrix_SVGD(x, grad_logp, kernel, B)

		adag += v ** 2 		# update sum of gradient's square
		if adagrad:
			x = x + step_size * v / np.sqrt(adag + 1e-12)
		else:
			x = x + step_size * v
		EVOLVEX[i+1,:,:] = x
	
	return EVOLVEX
def compute_alpha_no_fairness(theta, bias, Xtr, Ytr, Sen, Xref, sigma, B):
    Kt_ref = gaussian_kernel(Xtr, Xref, sigma)

    ##### compute the alpha, we get the linear and quardtic coefficient
    loss_vector, XT_W = logistic_regression_loss(
        theta, bias, Xtr, Ytr)  ### compute loss of each alpha
    ###linear coefficient
    p_matrix = Kt_ref * loss_vector.reshape(-1, 1)
    p = p_matrix.sum(axis=0)
    p = p.astype('double')

    ### quardtic coefficient
    Q = 2 * np.zeros((len(Xref), len(Xref)))
    Q = Q.astype('double')

    ### Compute A, b equality constraint
    A = np.sum(Kt_ref, axis=0).reshape(1, -1)
    b = [1.0 * len(Ytr)]
    A = A.astype('double')

    ### compute G and h inequality constraint and combine with fairness inequality
    lower = -np.identity(len(Xref))
    upper = np.identity(len(Xref))
    G = np.concatenate((lower, upper), axis=0)
    lower_h = np.full((len(Xref), 1), 0)
    upper_h = np.full((len(Xref), 1), B)

    h = np.concatenate((lower_h, upper_h), axis=0)
    h = h.astype('double')
    Q, p, G, h, A, b = -matrix(Q), -matrix(p), matrix(G), matrix(h), matrix(
        A), matrix(b)

    sol = solvers.qp(Q, p, G, h, A, b)
    alpha = np.array(sol['x'])
    weight = np.matmul(Kt_ref, alpha)
    ratio = np.sum(weight * Sen) / len(Sen)
    return weight, ratio
Esempio n. 8
0
def main():
    kernel = kernel.gaussian_kernel(size=21, sigma=3)
Esempio n. 9
0
    def test_gaussian_energy_matrix_shape(self):
        desc1 = np.ones((50, 5))
        desc2 = np.ones((400, 5))

        shape = np.shape(kernel.gaussian_kernel(desc1, desc2, 1))
        self.assertEqual(shape, (50, 400))