Ejemplo n.º 1
0
    def apply(self,
              referenceSamples=None,
              testSamples=None,
              gaussianCenters=None):
        """
        Computes the alpha-relative density ratio, r_alpha(X), of P(X_ref) and P(X_test)

          r_alpha(X) = P(Xref) / (alpha * P(Xref) + (1 - alpha) * P(X_test)

        Returns density ratio estimate at X_ref, r_alpha_ref, and at X_test, r_alpha_test
        """
        # Apply the kernel function to the reference and test samples
        K_ref = GaussianKernel(self.sigmaWidth).apply(referenceSamples,
                                                      gaussianCenters).T
        K_test = GaussianKernel(self.sigmaWidth).apply(testSamples,
                                                       gaussianCenters).T

        # Compute the parameters, theta_hat, of the density ratio estimator
        H_hat = AlphaRelativeDensityRatioEstimator.H_hat(
            self.alphaConstraint, K_ref, K_test)
        h_hat = AlphaRelativeDensityRatioEstimator.h_hat(K_ref)
        theta_hat = AlphaRelativeDensityRatioEstimator.theta_hat(
            H_hat, h_hat, self.lambdaRegularizer, self.kernelBasis)

        # Estimate the density ratio, r_alpha_ref = r_alpha(X_ref)
        r_alpha_ref = AlphaRelativeDensityRatioEstimator.g_of_X_theta(
            K_ref, theta_hat).T
        # Estimate the density ratio, r_alpha_test = r_alpha(X_test)
        r_alpha_test = AlphaRelativeDensityRatioEstimator.g_of_X_theta(
            K_test, theta_hat).T

        return (r_alpha_ref, r_alpha_test)
Ejemplo n.º 2
0
def test_1D():
    # 1 dimension
    f = genRandomFunction()
    X, y = genDataFromFunction(f, N=1000)

    kernel = GaussianKernel([1.0, 10.0])
    LGP = LGPCollection(kernel, .50, 100, max_models=3, sigma_n=0.1)
    LGP.train(X=X, y=y, optimize=True)

    x = np.linspace(np.min(X), np.max(X))
    x = np.reshape(x, (1, -1))

    y_expect = [LGP.eval_mean(x[:, i]) for i in range(x.shape[1])]

    y_sub_model = [[m.eval_mean(x[:, i]) for i in range(x.shape[1])]
                   for m in LGP.models]
    y_sub_model = np.array(y_sub_model).T

    plt.figure()
    plt.plot(x[0, :].flatten(), f(x))  # true function
    plt.plot(X[0, :], y, '.')  # noisy data
    for m in LGP.models:
        plt.plot(m.X[0, :], m.y, 'o', mfc='none')
    plt.plot(x[0, :], y_sub_model)
    plt.plot(x[0, :], y_expect, 'k--')  # estimated from the GP
Ejemplo n.º 3
0
    def __init__(self):

        super(Kerception_blockC, self).__init__()
        self.kernel_fn1 = LinearKernel()
        self.kconv1 = KernelConv2D(filters=1,
                                   kernel_size=3,
                                   padding='same',
                                   kernel_function=self.kernel_fn1)
        self.kernel_fn2 = SigmoidKernel()
        self.kconv2 = KernelConv2D(filters=1,
                                   kernel_size=3,
                                   padding='same',
                                   kernel_function=self.kernel_fn2)
        self.kernel_fn3 = GaussianKernel(gamma=1.0,
                                         trainable_gamma=True,
                                         initializer='he_normal')
        self.kconv3 = KernelConv2D(filters=4,
                                   kernel_size=3,
                                   padding='same',
                                   kernel_function=self.kernel_fn3)
        self.kernel_fn4 = PolynomialKernel(p=3, trainable_c=True)
        self.kconv4 = KernelConv2D(filters=5,
                                   kernel_size=3,
                                   padding='same',
                                   kernel_function=self.kernel_fn4)
        self.kernel_fn5 = PolynomialKernel(p=5, trainable_c=True)
        self.kconv5 = KernelConv2D(filters=5,
                                   kernel_size=3,
                                   padding='same',
                                   kernel_function=self.kernel_fn5)
Ejemplo n.º 4
0
def test_2d():
    # two dimensions
    dim = 2
    f = genRandomFunction(dim)
    X, y = genDataFromFunction(f, dim=dim, N=1000)

    x_coord, y_coord = np.meshgrid(np.linspace(0, 1), np.linspace(0, 1))
    x_eval = np.vstack(
        [np.reshape(x_coord, (1, -1)),
         np.reshape(y_coord, (1, -1))])
    f_eval = np.reshape(f(x_eval), x_coord.shape)

    mean = lambda x: 0
    params = {'sigma_n': .01, 'sigma_s': 1.0, 'width': 10.0}
    kernel = GaussianKernel([params['sigma_s'], params['width']])
    GP = GaussianProcess(mean, kernel)
    GP.train(X, y)
    GP.optimize_hyperparameters_random_search()

    # evaluate
    y_expect = [GP.eval_mean(x_eval[:, i]) for i in range(x_eval.shape[1])]
    y_expect = np.reshape(y_expect, x_coord.shape)
    v_max = max(np.max(y), -np.min(y))

    # plot
    color_options = {'cmap': 'RdBu', 'vmin': -v_max, 'vmax': v_max}

    plt.figure()
    plt.subplot(4, 1, 1)
    plt.pcolor(x_coord, y_coord, f_eval, **color_options)
    plt.title('Original Function')

    plt.subplot(4, 1, 2)
    plt.scatter(X[0, :], X[1, :], c=y, **color_options)
    plt.title('Data')

    plt.subplot(4, 1, 3)
    plt.title('GP Estimation')
    plt.pcolor(x_coord, y_coord, y_expect, **color_options)

    plt.subplot(4, 1, 4)
    plt.pcolor(x_coord, y_coord, -f_eval + y_expect, **color_options)
    plt.title('Residual')
Ejemplo n.º 5
0
def test_2D():
    # two dimensions
    dim = 2
    f = genRandomFunction(dim)
    X, y = genDataFromFunction(f, dim=dim, N=1000)

    x_coord, y_coord = np.meshgrid(np.linspace(0, 1), np.linspace(0, 1))
    x_eval = np.vstack(
        [np.reshape(x_coord, (1, -1)),
         np.reshape(y_coord, (1, -1))])
    f_eval = np.reshape(f(x_eval), x_coord.shape)

    kernel = GaussianKernel([1.0, 10.0])
    LGP = LGPCollection(kernel, .50, 100, max_models=3, sigma_n=0.1)
    LGP.train(X=X, y=y, optimize=True)

    y_expect = [LGP.eval_mean(x_eval[:, i]) for i in range(x_eval.shape[1])]
    y_expect = np.reshape(y_expect, x_coord.shape)
    v_max = max(np.max(y), -np.min(y))

    # plot
    color_options = {'cmap': 'RdBu', 'vmin': -v_max, 'vmax': v_max}

    plt.figure()
    plt.subplot(4, 1, 1)
    plt.pcolor(x_coord, y_coord, f_eval, **color_options)
    plt.title('Original Function')

    plt.subplot(4, 1, 2)
    plt.scatter(X[0, :], X[1, :], c=y, **color_options)
    plt.title('Data')

    plt.subplot(4, 1, 3)
    plt.title('LGP Estimation')
    plt.pcolor(x_coord, y_coord, y_expect, **color_options)

    plt.subplot(4, 1, 4)
    plt.pcolor(x_coord, y_coord, -f_eval + y_expect, **color_options)
    plt.title('Residual')

    plt.show()
Ejemplo n.º 6
0
def test_1d():
    # 1 dimension
    f = genRandomFunction()
    X, y = genDataFromFunction(f, N=100)

    params = {'sigma_n': .01, 'sigma_s': 1.0, 'width': 10.0}
    kernel = GaussianKernel([params['sigma_s'], params['width']])
    mean = lambda x: 0
    GP = GaussianProcess(mean, kernel, sigma_n=params['sigma_n'])
    GP.train(X=X, y=y)
    GP.optimize_hyperparameters_grid_search()

    x = np.linspace(np.min(X), np.max(X))
    x = np.reshape(x, (1, -1))

    y_expect = np.array([GP.eval_mean(x[:, i]) for i in range(x.shape[1])])
    y_var = np.array([GP.eval_var(x[:, i]) for i in range(x.shape[1])])
    y_std = np.sqrt(y_var)

    plt.figure()
    true, = plt.plot(x[0, :].flatten(),
                     f(x),
                     color='black',
                     label='True Function')  # true function
    data, = plt.plot(X[0, :], y, '.', color='orange',
                     label='Data')  # noisy data
    mean, = plt.plot(x[0, :],
                     y_expect,
                     '--',
                     color='blue',
                     label='Estimated Mean')  # estimated from the GP
    plt.fill_between(x[0, :],
                     y_expect + 2 * y_std,
                     y_expect - 2 * y_std,
                     color='gray',
                     linewidth=0.0,
                     alpha=0.5)
    plt.legend(handles=[true, data, mean])
if kernel== 'spectrum':
    print("K:",k)
if kernel == 'sum':
    print("List of Ks:",list_k)
    print("List of Ms:",list_m)
    print("Weights:", weights)
print()

##### APPLY SVM ON DATASET 0 #####

print("Applying SVM on dataset 0...")

if kernel=='linear':
    svm = SVM(kernel=LinearKernel(),C=C)
elif kernel=='rbf':
    svm = SVM(kernel=GaussianKernel(sigma=np.sqrt(0.5/gamma),normalize=False),C=C)
elif kernel=='poly':
    svm = SVM(kernel=PolynomialKernel(gamma=gamma,coef0=coef0,degree=degree),C=C)
elif kernel=='spectrum':
    svm = SVM(kernel=SpectrumKernel(k=k),C=C)
elif kernel=='mismatch':
    svm = SVM(kernel=MismatchKernel(k=k, m=m, neighbours=neighbours_0, kmer_set=kmer_set_0,normalize=True), C=C)
elif kernel=='sum':
    dataset_nbr = 0 
    kernels = []
    for k,m in zip(list_k,list_m):
        neighbours, kmer_set = load_or_compute_neighbors(dataset_nbr, k, m)
        kernels.append(MismatchKernel(k=k, m=m, neighbours=neighbours, kmer_set=kmer_set, normalize = True))
    svm = SVM(kernel=SumKernel(kernels=kernels, weights=weights), C=C)

if kernel_on_matrices:
        if validation is not None:
            accuracy = self._calc_accuracy(Xval, yval)
            print("Accuracy in validation data is %.3f" % accuracy)

    def predict(self, X):
        n = X.shape[0]
        scores = numpy.zeros((n, self.nclasses))

        print("One vs All prediction")
        for i in tqdm(range(self.nclasses)):
            scores[:, i] = self.SVMova[i].predict(X, confidence=True)

        return numpy.argmax(scores, axis=1)

    def _calc_accuracy(self, X, y):
        ypred = self.predict(X)
        return numpy.sum(ypred == y) * 100.0 / y.shape[0]

if __name__ == '__main__':
    from kernels import GaussianKernel
    #X = numpy.array([[3,4],[1,3],[2,2]])
    X = numpy.array([[-2,0],[-1,0],[1,0]])
    y = numpy.array([-1,-1,1])

    kernel = GaussianKernel(0.5)
    model = KernelSVMBinaryClassifier(kernel)
    model.fit(X, y, 0.5)
    print model.X
    print model.alpha
Ejemplo n.º 9
0
    def computeModelParameters(self,
                               referenceSamples=None,
                               testSamples=None,
                               gaussianCenters=None):
        """
        Computes model parameters via k-fold cross validation process
        """
        (refRows, refCols) = referenceSamples.shape
        (testRows, testCols) = testSamples.shape

        sigmaWidths = self.computeGaussianWidthCandidates(
            referenceSamples, testSamples)
        lambdaCandidates = self.generateRegularizationParams()

        Vector.show("Sigma Candidates", sigmaWidths, self.settings)
        Vector.show("Lambda Candidates", lambdaCandidates, self.settings)

        # Initialize cross validation scoring matrix
        crossValidationScores = numpy.zeros(
            (numpy.size(sigmaWidths), numpy.size(lambdaCandidates)))

        # Initialize a cross validation index assignment list
        referenceSamplesCVIdxs = numpy.random.permutation(refCols)
        referenceSamplesCVSplit = numpy.floor(numpy.r_[0:refCols] *
                                              self.crossFolds / refCols)
        testSamplesCVIdxs = numpy.random.permutation(testCols)
        testSamplesCVSplit = numpy.floor(numpy.r_[0:testCols] *
                                         self.crossFolds / testCols)

        # Initiate k-fold cross-validation procedure. Using variable
        # notation similar to the RULSIF formulas.
        for sigmaIdx in numpy.r_[0:numpy.size(sigmaWidths)]:

            # (re-)Calculate the kernel matrix using the candidate sigma width
            sigma = sigmaWidths[sigmaIdx]
            K_ref = GaussianKernel(sigma).apply(referenceSamples,
                                                gaussianCenters).T
            K_test = GaussianKernel(sigma).apply(testSamples,
                                                 gaussianCenters).T

            # Initialize a new result matrix for the current sigma candidate
            foldResult = numpy.zeros(
                (self.crossFolds, numpy.size(lambdaCandidates)))

            for foldIdx in numpy.r_[0:self.crossFolds]:

                K_ref_trainingSet = K_ref[:, referenceSamplesCVIdxs[
                    referenceSamplesCVSplit != foldIdx]]
                K_test_trainingSet = K_test[:, testSamplesCVIdxs[
                    testSamplesCVSplit != foldIdx]]

                H_h_KthFold = AlphaRelativeDensityRatioEstimator.H_hat(
                    self.alphaConstraint, K_ref_trainingSet,
                    K_test_trainingSet)
                h_h_KthFold = AlphaRelativeDensityRatioEstimator.h_hat(
                    K_ref_trainingSet)

                for lambdaIdx in numpy.r_[0:numpy.size(lambdaCandidates)]:

                    lambdaCandidate = lambdaCandidates[lambdaIdx]

                    theta_h_KthFold = AlphaRelativeDensityRatioEstimator.theta_hat(
                        H_h_KthFold, h_h_KthFold, lambdaCandidate,
                        self.kernelBasis)

                    # Select the subset of the kernel matrix not used in the training set
                    # for use as the test set to validate against
                    K_ref_testSet = K_ref[:, referenceSamplesCVIdxs[
                        referenceSamplesCVSplit == foldIdx]]
                    K_test_testSet = K_test[:, testSamplesCVIdxs[
                        testSamplesCVSplit == foldIdx]]

                    r_alpha_Xref = AlphaRelativeDensityRatioEstimator.g_of_X_theta(
                        K_ref_testSet, theta_h_KthFold)
                    r_alpha_Xtest = AlphaRelativeDensityRatioEstimator.g_of_X_theta(
                        K_test_testSet, theta_h_KthFold)

                    # Calculate the objective function J(theta) under the current parameters
                    J = AlphaRelativeDensityRatioEstimator.J_of_theta(
                        self.alphaConstraint, r_alpha_Xref, r_alpha_Xtest)

                    foldResult[foldIdx, lambdaIdx] = J

                crossValidationScores[sigmaIdx, :] = numpy.mean(foldResult, 0)

        Matrix.show("Cross-Validation Scores", crossValidationScores,
                    self.settings)

        crossValidationMinScores = crossValidationScores.min(1)
        crossValidationMinIdxForLambda = crossValidationScores.argmin(1)
        crossValidationMinIdxForSigma = crossValidationMinScores.argmin()

        optimalSigma = sigmaWidths[crossValidationMinIdxForSigma]
        optimalLambda = lambdaCandidates[
            crossValidationMinIdxForLambda[crossValidationMinIdxForSigma]]

        return (optimalSigma, optimalLambda)
Ejemplo n.º 10
0
# generate the center of the gaussian and the grid
x_q = np.random.uniform(-1, 1, 2)
x_p = np.meshgrid(np.linspace(-5, 5, 100), np.linspace(-5, 5, 100))
x_p_0 = np.reshape(x_p[0], (-1, 1))
x_p_1 = np.reshape(x_p[1], (-1, 1))

# generate a random 2x2 positive definite matrix with close to orthogonal eigenvectors
a = np.random.random(2)
a = a / np.linalg.norm(a)
b = np.random.random(2)
b = b / np.linalg.norm(b)
b = b - .8 * b.dot(a) * a  # make b almost proportional to a
b = b / np.linalg.norm(b)
W = np.outer(a, a) + np.outer(b, b)

# compute the kernel value over the entire grid
kernel = GaussianKernel([1, W])
K = np.array([
    kernel.eval(x_q, np.hstack([x_0, x_1])) for x_0, x_1 in zip(x_p_0, x_p_1)
])

# test batch evaluation
K_ = kernel.eval_batch(x_q[..., np.newaxis], np.reshape(x_p, (2, -1)))
assert ((K == K_).all())

K = np.reshape(K, x_p[0].shape)

# plot the gaussian
plt.pcolor(x_p[0], x_p[1], K)
plt.plot(x_q[0], x_q[1], '*')
plt.show()
Ejemplo n.º 11
0
    '''
    model.train(X,y,optimize=False)
    plt.figure()
    for m in model.models:
        mean = m.center
        cov = (m.X-m.center[...,np.newaxis]).dot((m.X-m.center[...,np.newaxis]).T)/m.X.shape[1]
        plot_cov_ellipse(cov, mean, fill=False)
        color = model.compute_distance(m.center, center)*np.ones(m.X[1,:].shape)
        plt.scatter(m.X[0,:], m.X[1,:], c=color, vmax=1.0, vmin=min(np.min(color),0.8))
    plt.colorbar()

if __name__ == '__main__':
    N = 1000
    init_params = [1.0,1.0]

    # test with sequential data
    X, y = get_sequential_data(N)
    kernel = GaussianKernel([1.0, 1.0])
    seq_model = LGPCollection(kernel, .98, 100)
    plot_local_models(X,y,seq_model)

    # test with randomly ordered data
    X, y = get_random_data(N)
    unordered_model = LGPCollection(kernel, .98, 100)
    plot_local_models(X,y,unordered_model)

    # test the model weights
    plot_model_weights(X, y, np.mean(X,1), unordered_model)

    plt.show()
Ejemplo n.º 12
0
    def __init__(self,
                 gamma_o=5,
                 gamma_c=4,
                 gamma_b=2,
                 gamma_p=3,
                 grid_o_dim=25,
                 grid_c_dims=(5, 5, 5),
                 grid_p_dims=(5, 5),
                 epsilon_g=0.8,
                 epsilon_s=0.2):
        print "basis for orientation"
        k_o = GaussianKernelForAngle(1 / numpy.sqrt(2 * gamma_o))
        self.projector_o = FeatureVectorProjection(k_o)
        X = numpy.linspace(-numpy.pi, numpy.pi, grid_o_dim + 1)[:-1]
        X = X[:, numpy.newaxis]
        self.projector_o.fit(X)

        print "basis for color"
        k_c = GaussianKernel(1 / numpy.sqrt(2 * gamma_c))
        self.projector_c = FeatureVectorProjection(k_c)
        r_step = 1.0 / (grid_c_dims[0] - 1)
        g_step = 1.0 / (grid_c_dims[1] - 1)
        b_step = 1.0 / (grid_c_dims[2] - 1)
        X = numpy.mgrid[0:1 + r_step:r_step, 0:1 + g_step:g_step,
                        0:1 + b_step:b_step].reshape(3, -1).T
        self.projector_c.fit(X)

        print "basis for binary patterns"
        k_b = GaussianKernel(1 / numpy.sqrt(2 * gamma_b))
        self.projector_b = FeatureVectorProjection(k_b)
        X = numpy.mgrid[0:2:1, 0:2:1, 0:2:1, 0:2:1, 0:2:1, 0:2:1, 0:2:1,
                        0:2:1].reshape(8, -1).T
        self.projector_b.fit(X)

        print "basis for positions"
        k_p = GaussianKernel(1 / numpy.sqrt(2 * gamma_p))
        self.projector_p = FeatureVectorProjection(k_p)
        x_step = 1.0 / (grid_p_dims[0] - 1)
        y_step = 1.0 / (grid_p_dims[1] - 1)
        X = numpy.mgrid[0:1 + x_step:x_step,
                        0:1 + y_step:y_step].reshape(2, -1).T
        self.projector_p.fit(X)

        self.epsilon_g = epsilon_g
        self.epsilon_s = epsilon_s

        kpca_kernel = GaussianKernel(0.4)
        X_p = self.projector_p.predict(self.projector_p.basis)

        kdes_dim = self.projector_o.ndim * self.projector_p.ndim
        X_o = self.projector_o.predict(self.projector_o.basis)
        X_op = numpy.zeros((kdes_dim, kdes_dim))
        for i, (x, y) in enumerate(zip(X_o, X_p)):
            X_op[i, :] = numpy.kron(x, y)
        self.kpca_op = KernelPCA(kpca_kernel)
        self.kpca_op.fit(X_op)

        kdes_dim = self.projector_c.ndim * self.projector_p.ndim
        X_c = self.projector_c.predict(self.projector_c.basis)
        X_cp = numpy.zeros((kdes_dim, kdes_dim))
        pos = 0
        for x in X_c:
            for y in X_p:
                X_cp[pos, :] = numpy.kron(x, y)
                pos += 1
        self.kpca_cp = KernelPCA(kpca_kernel)
        self.kpca_cp.fit(X_cp)
from cross_entropy_classifier import CrossEntropyClassifier
from kernels import (LinearKernel, GaussianKernel, HistogramIntersectionKernel,
                     LaplacianRBFKernel, SublinearRBFKernel)
from svm import KernelSVMOneVsOneClassifier, KernelSVMOneVsAllClassifier
from load_features import load_features
from utils import plot_history, write_output, concat_bias

output_suffix = 'trial15'

feature_extractor = 'hog_fisher'
overwrite_features = False

overwrite_kpca = False
kernel_pca = True
kernel_pca_kernel = GaussianKernel(0.6)
cut_percentage = 90
# to change when small data: n_train and n_test in utils.py, n_components in fisher_feature_extractor.py
folder_name = 'data/'
# folder_name = 'data_small/'

nclasses = 10
classifier = 'svm_ovo'
do_validation = True
validation = 0.2
do_prediction = False

svm_kernel = LinearKernel()
#svm_kernel = LaplacianRBFKernel(1.6)
C = 1
        self.G = G.real

    def predict(self, X):
        assert X.ndim == 2
        n, dX = X.shape
        m, dB = self.basis.shape
        assert dX == dB
        K = self.kernel.build_K(X, self.basis)
        return numpy.dot(K, self.G)


if __name__ == '__main__':
    from kernels import GaussianKernel

    sigma = 1
    kernel = GaussianKernel(sigma)
    projector = FeatureVectorProjection(kernel)
    basis = numpy.linspace(-10, 10, 20)
    basis = basis[:, numpy.newaxis]
    projector.fit(basis)

    x = numpy.linspace(-10, 10, 500)
    x = x[:, numpy.newaxis]

    features = projector.predict(x)
    center = numpy.array([0])
    center = center[:, numpy.newaxis]
    features_center = projector.predict(center)

    ypred = numpy.dot(features, features_center.T)
    y = numpy.exp(-x**2 / (2 * sigma**2))
Ejemplo n.º 15
0
from kernel_pca import KernelPCA
from kmeans import Kmeans

cats = ['sci.med', 'misc.forsale', 'soc.religion.christian']
newsgroups_all = fetch_20newsgroups(subset='all', categories=cats)
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(newsgroups_all.data)
X = vectors.toarray()
y = newsgroups_all.target
# only take 800 for training and 200 for testing
X_train = X[0:800, :]
X_test = X[800:1000, :]
y_train = y[0:800]
y_test = y[800:1000]

kernel = GaussianKernel(sigma=1)  # to change
kpca = KernelPCA(kernel)
kpca.fit(X_train)
n_components = 2  # to change
X_train_proj = kpca.predict(X_train, components=n_components)
X_test_proj = kpca.predict(X_test, components=n_components)

permuts = numpy.array([[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1],
                       [2, 1, 0]])


def find_permut_for_prediction(y_pred, y, permuts):
    y_pred_best = y_pred
    accuracy_best = 0.
    permut_best = permuts[0, :]
    for i in range(0, len(permuts)):
Ejemplo n.º 16
0
# Let's blur the owl
## Define a grid of sample locations
new_shape = np.asarray(image.shape[2:]) // 2
I, J = np.meshgrid(np.linspace(1, image.shape[2], num=new_shape[0]),
                   np.linspace(1, image.shape[3], num=new_shape[1]),
                   indexing='ij')
samples = np.stack((I, J), 0)

# Convert to pytorch tensors
image = torch.FloatTensor(image).type(dtype)
samples = torch.FloatTensor(samples).type(dtype)

# Create an interpolator
#itp = Interpolator(BilinearKernel(), samples)
print('Image shape: {}'.format(image.shape))
print('Sample shape: {}'.format(samples.size()))
#itp = Interpolator(BilinearKernel(), samples)
itp = Interpolator(GaussianKernel(5, 1), samples)

# Run the interpolator
start = time.time()
new_image = itp.forward(image)
duration = time.time() - start
print("Duration: {}".format(duration))

# View
new_shape = np.append(3, new_shape)
new_shape = np.append(1, new_shape)
new_image = torch.reshape(new_image, tuple(new_shape)).cpu().numpy()
view(new_image[0, ...])
Ejemplo n.º 17
0
sigma = .5
scale = np.power(np.power(0.5, 1/3), np.arange(nlevels+1))

H, W = image.shape[2], image.shape[3]
for level in range(nlevels):
    h_old = int(np.floor(H * scale[level]))-1
    w_old = int(np.floor(W * scale[level]))-1
    h = int(np.floor(H * scale[level+1]))
    w = int(np.floor(W * scale[level+1]))

    I, J = np.meshgrid(np.linspace(0, h_old, num=h),
                       np.linspace(0, w_old, num=w), indexing='ij')
    samples = np.stack((I,J), 0)
    samples = torch.FloatTensor(samples).type(dtype)

    scale_space.append(self(GaussianKernel(5, sigma), samples))

scale_space = nn.Sequential(*scale_space)

image = image.cuda()
scale_space = scale_space.cuda()
# Run the interpolator
start = time.time()
image = scale_space.forward(image)
duration = time.time() - start
print("Duration: {}".format(duration / 10.))

# View
image = image.cpu().numpy()
view(image[0,...])
    from sklearn.datasets import make_circles
    from kernels import LinearKernel, GaussianKernel

    f, axarr = plt.subplots(2, 2, sharex=True)

    X, y = make_circles(n_samples=1000,
                        random_state=123,
                        noise=0.1,
                        factor=0.2)
    axarr[0, 0].scatter(X[y == 0, 0], X[y == 0, 1], color='red')
    axarr[0, 0].scatter(X[y == 1, 0], X[y == 1, 1], color='blue')

    kpca = KernelPCA(LinearKernel())
    kpca.fit(X)
    Xproj = kpca.predict(1)
    axarr[0, 1].scatter(Xproj[y == 0, 0], numpy.zeros(500), color='red')
    axarr[0, 1].scatter(Xproj[y == 1, 0], numpy.zeros(500), color='blue')

    # decrease sigma to improve separation
    kpca = KernelPCA(GaussianKernel(0.686))
    kpca.fit(X, cut_percentage=95, plot=True)
    print kpca.alpha.shape[1]
    Xproj = kpca.predict(2)
    axarr[1, 0].scatter(Xproj[y == 0, 0], numpy.zeros(500), color='red')
    axarr[1, 0].scatter(Xproj[y == 1, 0], numpy.zeros(500), color='blue')

    axarr[1, 1].scatter(Xproj[y == 0, 0], Xproj[y == 0, 1], color='red')
    axarr[1, 1].scatter(Xproj[y == 1, 0], Xproj[y == 1, 1], color='blue')

    plt.show()
Ejemplo n.º 19
0
    N = len(settings)

    for _, params in enumerate(settings):

        logger.info(f"Run {_ + 1} / {N}")

        if args.use_precompute_gram:
            gamma, _lambda = None, None
            sigma, window_size = params
        else:
            gamma, _lambda, sigma, window_size = params
        # convert window_size to int
        window_size = int(window_size)

        if kernel_name == "Gaussian":
            kernel = GaussianKernel(gamma)

        elif kernel_name == "Linear":
            kernel = LinearKernel()

        elif kernel_name == "Conv":
            kernel = ConvKernel(sigma=sigma, k=window_size)

        if model_name == "SVM":
            clf = SVM(_lambda=_lambda, kernel=kernel)

        elif model_name == "SPR":
            clf = SPR(kernel=kernel)

        elif model_name == "SVM_precomputed_gram":
            if args.use_precompute_gram:
Ejemplo n.º 20
0
 def __init__(self, mean=None, kernel=None, sigma_n=0.001):
     
     # set defaults
     self.sigma_n = sigma_n
     self.kernel = kernel or GaussianKernel([1.0,1.0])
     self.mean = mean or (lambda x: 0)
Ejemplo n.º 21
0
 def __init__(self, feature_selector, kernel=GaussianKernel(), C=1,
              cache_size=200):
     super(SVMModel, self).__init__(feature_selector)
     self.C = C
     self.kernel = kernel
     self.cache_size = cache_size
Ejemplo n.º 22
0
    len_files = len(FILES)
    
    best_score = {i: 0 for i in range(len_files)}
    best_lambda = {i: 0 for i in range(len_files)}
    best_gamma = {i: 0 for i in range(len_files)}
    best_sigma = {i: 0 for i in range(len_files)}
    best_window_size = {i: 0 for i in range(len_files)}
    
    
    # Main loop
    for _, params in enumerate(settings):
        
        gamma, _lambda, = params
        
        if kernel_name == "Gaussian":
            kernel = GaussianKernel(gamma)

        elif kernel_name == "Linear":
            kernel = LinearKernel()

        if model_name == "SVM":
            clf = SVM(_lambda=_lambda, kernel=kernel)

        elif model_name == "SPR":
            clf = SPR(kernel=kernel)

        # Loop from pre-computed embeddings
        #for filename in os.listdir(EMBEDDING_DIR)[:1]: # small test
        for filename in os.listdir(EMBEDDING_DIR):
        
            # Full path
Ejemplo n.º 23
0
# Do SVM with Gaussian Kernel predictions

results0 = np.zeros(3000)
len_files = len(FILES)

for i in range(len_files):

    γ = gamma_list[i]
    λ = lambda_list[i]

    X_train, Y_train, X_test = load_data(i,
                                         data_dir=DATA_DIR,
                                         files_dict=FILES)

    kernel = GaussianKernel(γ)
    clf = SVM(_lambda=λ, kernel=kernel)
    clf.fit(X_train, Y_train)
    y_pred = clf.predict(X_test)
    results0[i * 1000:i * 1000 + 1000] = y_pred

# SAVE Results
save_results("results_SVM_gaussian.csv", results0, RESULT_DIR)
print("1/3 Ending SVM with Gaussian kernel...")

#####################################
# 2) SVM with Convolutional kernel  #
#####################################
print("2/3 Starting SVM with Convolutional kernel...")
# Define parameters lists
sigma_list = [0.31, 0.31, 0.3]