def __init__(self): super(Kerception_blockC, self).__init__() self.kernel_fn1 = LinearKernel() self.kconv1 = KernelConv2D(filters=1, kernel_size=3, padding='same', kernel_function=self.kernel_fn1) self.kernel_fn2 = SigmoidKernel() self.kconv2 = KernelConv2D(filters=1, kernel_size=3, padding='same', kernel_function=self.kernel_fn2) self.kernel_fn3 = GaussianKernel(gamma=1.0, trainable_gamma=True, initializer='he_normal') self.kconv3 = KernelConv2D(filters=4, kernel_size=3, padding='same', kernel_function=self.kernel_fn3) self.kernel_fn4 = PolynomialKernel(p=3, trainable_c=True) self.kconv4 = KernelConv2D(filters=5, kernel_size=3, padding='same', kernel_function=self.kernel_fn4) self.kernel_fn5 = PolynomialKernel(p=5, trainable_c=True) self.kconv5 = KernelConv2D(filters=5, kernel_size=3, padding='same', kernel_function=self.kernel_fn5)
def test_linear_kernel(): while True: N = np.random.randint(1, 100) M = np.random.randint(1, 100) C = np.random.randint(1, 1000) X = np.random.rand(N, C) Y = np.random.rand(M, C) mine = LinearKernel()(X, Y) gold = sk_linear(X, Y) np.testing.assert_almost_equal(mine, gold) print("PASSED")
best_lambda = {i: 0 for i in range(len_files)} best_gamma = {i: 0 for i in range(len_files)} best_sigma = {i: 0 for i in range(len_files)} best_window_size = {i: 0 for i in range(len_files)} # Main loop for _, params in enumerate(settings): gamma, _lambda, = params if kernel_name == "Gaussian": kernel = GaussianKernel(gamma) elif kernel_name == "Linear": kernel = LinearKernel() if model_name == "SVM": clf = SVM(_lambda=_lambda, kernel=kernel) elif model_name == "SPR": clf = SPR(kernel=kernel) # Loop from pre-computed embeddings #for filename in os.listdir(EMBEDDING_DIR)[:1]: # small test for filename in os.listdir(EMBEDDING_DIR): # Full path file_path = os.path.join(EMBEDDING_DIR, filename) # Parsing dataset_idx, sigma, window_size = filename_parser(filename)
overwrite_kpca = False kernel_pca = True kernel_pca_kernel = GaussianKernel(0.6) cut_percentage = 90 # to change when small data: n_train and n_test in utils.py, n_components in fisher_feature_extractor.py folder_name = 'data/' # folder_name = 'data_small/' nclasses = 10 classifier = 'svm_ovo' do_validation = True validation = 0.2 do_prediction = False svm_kernel = LinearKernel() #svm_kernel = LaplacianRBFKernel(1.6) C = 1 Xtrain, Ytrain, Xtest = load_features(feature_extractor, overwrite_features, overwrite_kpca, kernel_pca, kernel_pca_kernel, cut_percentage, folder_name) #Xtrain = numpy.reshape(Xtrain, (Xtrain.shape[0], -1)) #Xtest = numpy.reshape(Xtest, (Xtest.shape[0], -1)) print(Xtrain.shape) print(Xtest.shape) assert Xtrain.ndim == 2 and Xtrain.shape[1] == Xtest.shape[1] print("Fitting on training data") if classifier == 'cross_entropy':
if __name__ == '__main__': from sklearn.datasets import make_circles from kernels import LinearKernel, GaussianKernel f, axarr = plt.subplots(2, 2, sharex=True) X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) axarr[0, 0].scatter(X[y == 0, 0], X[y == 0, 1], color='red') axarr[0, 0].scatter(X[y == 1, 0], X[y == 1, 1], color='blue') kpca = KernelPCA(LinearKernel()) kpca.fit(X) Xproj = kpca.predict(1) axarr[0, 1].scatter(Xproj[y == 0, 0], numpy.zeros(500), color='red') axarr[0, 1].scatter(Xproj[y == 1, 0], numpy.zeros(500), color='blue') # decrease sigma to improve separation kpca = KernelPCA(GaussianKernel(0.686)) kpca.fit(X, cut_percentage=95, plot=True) print kpca.alpha.shape[1] Xproj = kpca.predict(2) axarr[1, 0].scatter(Xproj[y == 0, 0], numpy.zeros(500), color='red') axarr[1, 0].scatter(Xproj[y == 1, 0], numpy.zeros(500), color='blue') axarr[1, 1].scatter(Xproj[y == 0, 0], Xproj[y == 0, 1], color='red') axarr[1, 1].scatter(Xproj[y == 1, 0], Xproj[y == 1, 1], color='blue')
print("Coef0:", coef0) print("Degree:", degree) if kernel== 'spectrum': print("K:",k) if kernel == 'sum': print("List of Ks:",list_k) print("List of Ms:",list_m) print("Weights:", weights) print() ##### APPLY SVM ON DATASET 0 ##### print("Applying SVM on dataset 0...") if kernel=='linear': svm = SVM(kernel=LinearKernel(),C=C) elif kernel=='rbf': svm = SVM(kernel=GaussianKernel(sigma=np.sqrt(0.5/gamma),normalize=False),C=C) elif kernel=='poly': svm = SVM(kernel=PolynomialKernel(gamma=gamma,coef0=coef0,degree=degree),C=C) elif kernel=='spectrum': svm = SVM(kernel=SpectrumKernel(k=k),C=C) elif kernel=='mismatch': svm = SVM(kernel=MismatchKernel(k=k, m=m, neighbours=neighbours_0, kmer_set=kmer_set_0,normalize=True), C=C) elif kernel=='sum': dataset_nbr = 0 kernels = [] for k,m in zip(list_k,list_m): neighbours, kmer_set = load_or_compute_neighbors(dataset_nbr, k, m) kernels.append(MismatchKernel(k=k, m=m, neighbours=neighbours, kmer_set=kmer_set, normalize = True)) svm = SVM(kernel=SumKernel(kernels=kernels, weights=weights), C=C)
from kernels import ConstantKernel, LinearKernel, SumKernel, ProductKernel, Matern12, Matern52, \ PeriodicKernel from utils import multiple_formatter # Set values to model parameters. lengthscale = 1 signal_variance = 1. noise_variance = 0.01 # Create the GP. e_kernel = ExponentialSquaredKernel(lengthscale=lengthscale * 2, signal_variance=signal_variance) c_kernel = ConstantKernel(variance=1) l_kernel = LinearKernel(variance=1) p_kernel = PeriodicKernel(lengthscale=lengthscale, signal_variance=signal_variance, period=np.pi * 0.25) e2_kernel = ExponentialSquaredKernel(lengthscale=lengthscale * 1000, signal_variance=signal_variance) m5_kernel = Matern52(lengthscale=lengthscale * 10, signal_variance=signal_variance * 10) m1_kernel = Matern12(lengthscale=lengthscale, signal_variance=signal_variance) # kernel = SumKernel(m5_kernel, m1_kernel) # kernel = p_kernel kernel = m5_kernel