Esempio n. 1
0
File: kreg.py Progetto: rikel/cvm
    def __init__(self, kernel, gamma=1.0, nmax=1000):
        super(BaseKernelReg, self).__init__(nmax)

        if kernel == 'rbf':
            self.kernel = lambda X, Y: rbf_kernel(X, Y, gamma)
        else:
            raise NotImplementedError('Kernel not supported')
Esempio n. 2
0
def validation(train, train_r, val, val_r, test, test_r, perm_matrices, T):
    hyperparameters_c = [
        0.00009, 0.0004, 0.0005, 0.0006, 0.00077, 0.0008, 0.0009, 0.001
    ]
    hyperparameters_l = [
        0.005, 0.006, 0.007, 0.008, 0.01, 0.02, 0.03, 0.05, 0.06, 0.07, 0.08
    ]
    shape_1 = train.shape

    best_AP = 0
    for c in hyperparameters_c:
        start_time = time.time()
        for l in hyperparameters_l:
            train_reshape = np.reshape(train,
                                       (shape_1[0], shape_1[1] * shape_1[2]))
            K = kernel.rbf_kernel(train_reshape, train_reshape, c)
            K_inv, gamma_train = training_MAP(train, train_r, K, l)
            AP = test_proc(train, val, val_r, K_inv, gamma_train,
                           perm_matrices, c, T)

            if AP > best_AP:
                best_AP = AP
                l_opt = l
                c_opt = c
                print(best_AP, 'best_AP_so far', c_opt, l_opt)
        end_time = time.time()
        print("total time taken this loop: ", end_time - start_time)

    print(l_opt, c_opt)
Esempio n. 3
0
    def __init__(self, kernel, gamma=1.0, nmax=1000, max_sv=0.5):
        super(BaseKernelReg, self).__init__(nmax, max_sv)

        if kernel == 'rbf':
            self.kernel = lambda X, Y: rbf_kernel(X, Y, gamma)
        else:
            raise NotImplementedError('Kernel not supported')
Esempio n. 4
0
def Stein_sampler(model, n_particles, max_iter, step_size, seed = 44, 
				adagrad = True, kernel_type = 'mixture'):
	np.random.seed(seed)
	d = model.dimension
	sig = 1.5
	
	x_initial = sig * np.random.randn(n_particles, d)	# set initial particles as standard normal
	
	x = x_initial

	EVOLVEX = np.zeros([max_iter+1, n_particles, d])
	EVOLVEX[0,:,:] = x

	adag = np.zeros([n_particles, d])	# for adagrad
	for i in range(max_iter):
		grad_logp = model.grad_log_p(x)		# gradient information for each particles: n*d
		Hs = model.Hessian_log_p(x)			# Hessian information for each particles: n*d*d
		A = np.mean(Hs, axis = 0)			# Average Hessian: d*d

		if kernel_type == 'newton':			# SVN
			v = SVN(x, grad_logp, Hs)
		elif kernel_type == 'mixture':		# matrix SVGD(mixture)
			v = mixture_hessian_SVGD(x, grad_logp, Hs)
		else:
			if kernel_type == 'gaussian':	# matrix SVGD(average)
				kernel = gaussian_kernel(A)
				B = model.inv_avg_Hessian(A)
			else:							# vanilla SVGD
				if i > 30:
					kernel = rbf_kernel(d, decay = True)
				else:
					kernel = rbf_kernel(d)
				B = np.eye(d)
			v = matrix_SVGD(x, grad_logp, kernel, B)

		adag += v ** 2 		# update sum of gradient's square
		if adagrad:
			x = x + step_size * v / np.sqrt(adag + 1e-12)
		else:
			x = x + step_size * v
		EVOLVEX[i+1,:,:] = x
	
	return EVOLVEX
Esempio n. 5
0
 def __sparse_features(self, X, sigma, rbf_kernel_type='conv'):
     """
     :param X: inputs of size (N,3)
     :return: hinged features with intercept of size (N, # of features + 1)
     """
     if rbf_kernel_type == 'conv':
         rbf_features = rbf_kernel_conv(X, self.grid, gamma=self.gamma, sigma=sigma, device=self.device)
     elif rbf_kernel_type == 'wass':
         rbf_features = rbf_kernel_wasserstein(X, self.grid, gamma=self.gamma, sigma=sigma, device=self.device)
     else:
         rbf_features = rbf_kernel(X, self.grid, gamma=self.gamma)
     return rbf_features
Esempio n. 6
0
def check_rbf_kernel():
    ex_name = "RBF kernel"
    n, m, d = 3, 5, 7
    gamma = 0.5
    X = np.random.random((n, d))
    Y = np.random.random((m, d))
    try:
        K = kernel.rbf_kernel(X, Y, gamma)
    except NotImplementedError:
        log(red("FAIL"), ex_name, ": not implemented")
        return True
    for i in range(n):
        for j in range(m):
            exp = np.exp(-gamma * (np.linalg.norm(X[i] - Y[j])**2))
            got = K[i][j]
            if (not equals(exp, got)):
                log(
                    red("FAIL"), ex_name,
                    ": values at ({}, {}) do not match. Expected {}, got {}".
                    format(i, j, exp, got))
    log(green("PASS"), ex_name, "")
Esempio n. 7
0
def test_proc(train, test, test_r, K_inv, gamma_train, perm_matrices, c, T):
    """
    Tests on all test data points for a bias c, and training input K_inv, gamma_train (trained with l)
    :param train_r: matrix of training rel labels, padded with zeros np_array (nr_queries x nr_documents x dim_feature)
    :param test: matrix test data, padded with zeros np_array (nr_queries x nr_documents x dim_feature)
    :param test_r: matrix test rel labels , padded with zeros np_array (nr_queries x nr_documents x dim_feature)
    :param K_inv: (K + nlI)^{-1} (nr_documents x nr_documents)
    :param gamma_train: array of all gamma(r^{i}) (nr_queries x nr_documents x nr_documents)
    :param perm_matrices: All possible permutation matrices with dim nr_documents
    :param c: bias for linear kernel
    :param T: Upper triangle matrix defined for the MAP
    :return: Returns the mean average precision.
    """
    nr_test_queries = test_r.shape[0]
    shape_1 = train.shape

    AP = 0

    j = 0
    for i in range(nr_test_queries):
        j += 1
        x = np.expand_dims(test[i, :, :], axis=0)
        shape_x = x.shape
        x = np.reshape(x, (shape_x[0], shape_x[1] * shape_x[2]))
        train_reshape = np.reshape(train,
                                   (shape_1[0], shape_1[1] * shape_1[2]))
        Kx = kernel.rbf_kernel(train_reshape, x, c)

        prediction = g_MAP(train, Kx, K_inv, gamma_train)

        (value, optimal_perm) = train_max(prediction, T, perm_matrices)
        r = gamma_MAP(test_r[i, :], k=10)
        r[r > 1] == 1
        AP = AP * (j - 1) / j + objective_fun(r, T, optimal_perm) / j

    return AP
Esempio n. 8
0
train_r = np.concatenate((train_r, val_r), axis=0)

shape_train = train.shape  #FOR TRAINING
train = np.reshape(train, (shape_train[0] * shape_train[1], shape_train[2]))
scaler = preprocessing.StandardScaler().fit(train)
train = scaler.transform(train)
train = np.reshape(train, (shape_train[0], shape_train[1], shape_train[2]))
train_reshape = np.reshape(train,
                           (shape_train[0], shape_train[1] * shape_train[2]))

shape_test = test.shape  # FOR TESTING
test = np.reshape(test, (shape_test[0] * shape_test[1], shape_test[2]))
test = scaler.transform(test)
test = np.reshape(test, (shape_test[0], shape_test[1], shape_test[2]))

K = kernel.rbf_kernel(train_reshape, gamma=c)
K_inv, gamma_train = training_MAP(train, train_r, K, l)
test_proc(train, test, test_r, K_inv, gamma_train, perm_matrices, c, T)


def validation(train, train_r, val, val_r, test, test_r, perm_matrices, T):
    hyperparameters_c = [
        0.00009, 0.0004, 0.0005, 0.0006, 0.00077, 0.0008, 0.0009, 0.001
    ]
    hyperparameters_l = [
        0.005, 0.006, 0.007, 0.008, 0.01, 0.02, 0.03, 0.05, 0.06, 0.07, 0.08
    ]
    shape_1 = train.shape

    best_AP = 0
    for c in hyperparameters_c:
Esempio n. 9
0

temp_parameter = 1

theta = run_kernel_softmax("lineal_pca18", linear_kernel, train_pca, train_y,
                           temp_parameter)
test_error = compute_kernel_test_error(test_pca, test_y, linear_kernel, theta,
                                       train_pca, temp_parameter)
print('\nsoftmax_kernel lineal_pca18 \t\ttest_error:')
print('(t = {})  \t\t\t\t{:.3}'.format(temp_parameter, test_error))

c, p = 0.5, 2
kernel = lambda X, Y: polynomial_kernel(X, Y, c, p)
theta = run_kernel_softmax("polinomial_{}_{}_pca18".format(c, p), kernel,
                           train_pca, train_y, temp_parameter)
test_error = compute_kernel_test_error(test_pca, test_y, kernel, theta,
                                       train_pca, temp_parameter)
print('\nsoftmax_kernel polinomial_pca18 \ttest_error:')
print('(t = {}, c = {}, p = {})  \t\t{:.3}'.format(temp_parameter, c, p,
                                                   test_error))

gamma = 1
kernel = lambda X, Y: rbf_kernel(X, Y, gamma)
theta = run_kernel_softmax("rbf_{}_pca18".format(gamma), kernel, train_pca,
                           train_y, temp_parameter)
test_error = compute_kernel_test_error(test_pca, test_y, kernel, theta,
                                       train_pca, temp_parameter)
print('\nsoftmax_kernel rbf_pca18 \t\ttest_error:')
print('(t = {}, gamma = {})  \t\t\t{:.3}'.format(temp_parameter, gamma,
                                                 test_error))