示例#1
0
文件: ksd.py 项目: sweetice/stein_ksd
    def __init__(self, name, params=dict(beta=1e-2)):
        self.params = params
        if name == 'rbf':
            self.k_method = rbf_kernel(params)

        if name == 'imq':
            self.k_method = imq_kernel(params)

        # if name == 'poly':
        #     self.k_method = poly_kernel(params)

        self.k = self.k_method.value
        self.grad_kx = self.k_method.grad_x
        self.grad_ky = self.k_method.grad_y
        self.grad_kxy = self.k_method.grad_xy

        self.p = params['p']
        self.q = params['q']
示例#2
0
        kernel_mat = np.zeros((self.n, self.n))
        for i in range(self.n):
            for j in range(self.n):
                kernel_mat[i, j] = self.kernel(self.X[i], self.X[j])
        return kernel_mat

    def predict(self, X_new):
        centered_X_new = X_new - np.mean(self.X, 0)
        return self.kernel(centered_X_new, self.X).dot(self.W)


if __name__ == '__main__':
    X, color = datasets.make_swiss_roll(n_samples=1500)
    # X, color = datasets.make_s_curve(1500, random_state=0)
    # pca = PCA()
    pca = KPCA()
    kpca = KPCA(kernel=rbf_kernel(0.01))
    X_pca = pca.fit(X, 2)
    X_kpca = kpca.fit(X, 2)
    fig = plt.figure()
    ax = fig.add_subplot(221, projection='3d')
    ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
    ax.set_title("Original data")
    ax = fig.add_subplot(222)
    ax.scatter(X_pca[:, 0], X_pca[:, 1], c=color, cmap=plt.cm.Spectral)
    plt.title('Projected data-PCA')
    ax = fig.add_subplot(223)
    ax.scatter(X_kpca[:, 0], X_kpca[:, 1], c=color, cmap=plt.cm.Spectral)
    plt.title('Projected data-KPCA')
    plt.show()
示例#3
0
 def compute_rbf_kernel(self, gamma: float = None):
     self.K = rbf_kernel(self.X, gamma)
     self.gamma = gamma
     self.kernel_type = 'global'
示例#4
0
    }
    data = torch.randn(10000, 784).type(torch.FloatTensor)

    # OPU Tests:
    # opu_pytorch, _ = project_data(data, device_config, projection='opu', num_features=10000, scale=np.sqrt(0.5), degree=4, bias=0)

    # from kernels import opu_kernel
    # true_kernel = opu_kernel(device_config, data, var=0.5, bias=0, degree=4)

    # kernel_pytorch = torch.matmul(opu_pytorch, opu_pytorch.t())

    # print(torch.mean(torch.abs(kernel_pytorch.to('cpu') - true_kernel) / true_kernel))

    # RBF Tests:
    rbf_pytorch, _ = project_data(data,
                                  device_config,
                                  projection='rbf',
                                  num_features=10000,
                                  scale=np.sqrt(0.5),
                                  lengthscale='auto')

    from kernels import rbf_kernel
    true_kernel = rbf_kernel(device_config, data, var=0.5, lengthscale='auto')

    kernel_pytorch = torch.matmul(rbf_pytorch, rbf_pytorch.t())

    print(
        torch.mean(
            torch.abs(kernel_pytorch.to('cpu') - true_kernel) / true_kernel))

    print('Done!')
示例#5
0
#linear kernel logistic regression
bc_linear = AJ_kernels.linear_kernel(bc_x_std)
linear_reg_score = cross_val_score(LogisticRegression(),
                                   bc_linear,
                                   bc_y,
                                   scoring='accuracy',
                                   cv=5)
linear_reg_results = linear_reg_score.mean()
print('Linear kernelized log reg results:')
print(linear_reg_results)

#Gaussian rbf kernel logistic regression
rbf_reg_results = []
for gamma in range(100):
    bc_rbf = AJ_kernels.rbf_kernel(bc_x_std, gamma)
    rbf_reg_score = cross_val_score(LogisticRegression(),
                                    bc_rbf,
                                    bc_y,
                                    scoring='accuracy',
                                    cv=5)
    rbf_reg_results = rbf_reg_results + [rbf_reg_score.mean()]
print('Best Gaussian RBF kernelized log reg results:')
print(max(rbf_reg_results))

#poly kernel logistic regression
poly_reg_results = []
for p in range(10):
    bc_poly = AJ_kernels.poly_kernel(bc_x_std, p)
    poly_reg_score = cross_val_score(LogisticRegression(),
                                     bc_poly,