def nabla_RBF(X, parameters): sigma = parameters[0] matrix_norm = norm_matrix(X, X) theta = np.exp(-matrix_norm/(2*sigma**2)) grad_matrix = (np.multiply(matrix_norm, theta))/(sigma**3) return grad_matrix, theta
def kernel_gaussian_linear(matrix_1, matrix_2, parameters): K = 0 matrix = norm_matrix(matrix_1, matrix_2) for i in range(parameters.shape[1]): # print("beta", parameters[1, i]) # print("sigma", parameters[0, i]) K = K + parameters[1, i]**2*np.exp(-matrix / (2* parameters[0, i]**2)) return K
def nabla_RBF(X, parameters): # Computing the Kernel matrix matrix_norm = norm_matrix(X, X) sigma = parameters[0] batch_matrix = np.exp(-matrix_norm / (2 * sigma**2)) # Stack of matrices K_matrix = np.zeros((X.shape[0], X.shape[1], X.shape[0])) for i in range(X.shape[1]): K_matrix[:, i, :] = batch_matrix # The pairwize differences between elements matrix_diff = pairwise_diff(X) derivative_matrix = -np.multiply(matrix_diff, K_matrix) / (sigma**2) #print(derivative_matrix[0]) return derivative_matrix, batch_matrix
def nabla_rational_quadratic(X, parameters): alpha = parameters[0] beta = parameters[1] matrix_norm = norm_matrix(X, X) epsilon = 0.00001 theta = (beta**2 + matrix_norm)**(-(alpha + epsilon)) grad_matrix = np.zeros((2, X.shape[0], X.shape[0])) grad_1 = - np.multiply(theta, np.log(beta**2 + matrix_norm)) grad_2 = -2*(alpha+epsilon)*beta*(beta**2 + matrix_norm)**(-(alpha+epsilon)-1) grad_matrix[0] = grad_1 grad_matrix[1] = grad_2 return grad_matrix, theta
def nabla_rational_quad(X, parameters): # Computing the Kernel matrix matrix_norm = norm_matrix(X, X) alpha = parameters[0] beta = parameters[1] first_derivative_matrix = (beta + matrix_norm)**(-alpha - 1) batch_matrix = (beta + matrix_norm)**(-alpha) # Stack of matrices K_matrix = np.zeros((X.shape[0], X.shape[1], X.shape[0])) for i in range(X.shape[1]): K_matrix[:, i, :] = first_derivative_matrix # The pairwize differences between elements matrix_diff = pairwise_diff(X) derivative_matrix = -2 * alpha * np.multiply(matrix_diff, K_matrix) return derivative_matrix, batch_matrix
def nabla_linear_gaussian(X, parameters): theta = 0 matrix_diff_norm = norm_matrix(X, X) for i in range(parameters.shape[1]): # print("beta", parameters[1, i]) # print("sigma", parameters[0, i]) theta = theta + parameters[1, i]**2*np.exp(-matrix_diff_norm / (2* parameters[0, i]**2)) grad_matrix = np.zeros((2, parameters.shape[1], X.shape[0], X.shape[0])) for i in range(parameters.shape[1]): sigma = parameters[0, i] beta = parameters[1, i] grad_matrix[0, i] = beta**2*np.multiply(matrix_diff_norm, np.exp(-matrix_diff_norm / (2* sigma**2)))/(sigma**3) grad_matrix[1, i] = 2*beta*np.exp(-matrix_diff_norm/ (2* sigma**2)) return grad_matrix, theta
def kernel_cauchy(matrix_1, matrix_2, parameters): sigma = parameters[0] matrix = norm_matrix(matrix_1, matrix_2) return 1 / (1 + matrix / sigma**2)
def kernel_inverse_multiquad(matrix_1, matrix_2, parameters): beta = parameters[0] gamma = parameters[1] matrix = norm_matrix(matrix_1, matrix_2) return (beta**2 + gamma * matrix)**(-1 / 2)
def kernel_inverse_power_alpha(matrix_1, matrix_2, parameters): alpha = parameters[0] beta = 1.0 epsilon = 0.0001 matrix = norm_matrix(matrix_1, matrix_2) return (beta**2 + matrix)**(-(alpha + epsilon))
def kernel_rational_quadratic(matrix_1, matrix_2, parameters): alpha = parameters[0] beta = parameters[1] epsilon = 0.0001 matrix = norm_matrix(matrix_1, matrix_2) return (beta**2 + matrix)**(-(alpha + epsilon))
def kernel_laplacian(matrix_1, matrix_2, parameters): gamma = parameters[0] matrix = norm_matrix(matrix_1, matrix_2) K = np.exp(-matrix * gamma) return K
def kernel_RBF(matrix_1, matrix_2, parameters): matrix = norm_matrix(matrix_1, matrix_2) sigma = parameters[0] K = np.exp(-matrix / (sigma**2)) return K