def cross_cov_grad_data(self, inputs_1, inputs_2): # NOTE: This is the gradient wrt the inputs of inputs_2 # The gradient wrt the inputs of inputs_1 is -1 times this r2 = np.abs(kernel_utils.dist2(self.ls.value, inputs_1, inputs_2)) r = np.sqrt(r2) grad_r2 = (5.0/6.0)*np.exp(-SQRT_5*r)*(1 + SQRT_5*r) return grad_r2[:,:,np.newaxis] * kernel_utils.grad_dist2(self.ls.value, inputs_1, inputs_2)
def cross_cov_grad_data(self, inputs_1, inputs_2): # NOTE: This is the gradient wrt the inputs of inputs_2 # The gradient wrt the inputs of inputs_1 is -1 times this r2 = np.abs(kernel_utils.dist2(self.ls.value, inputs_1, inputs_2)) r = np.sqrt(r2) grad_K_r2 = np.exp(-0.5 * r2) * (-r) grad_r2_x1 = kernel_utils.grad_dist2(self.ls.value, inputs_1, inputs_2) grad_r2_x2 = -grad_r2_x1 return grad_K_r2[:, :, None] * grad_r2_x2
def cross_cov_grad_data(self, inputs_1, inputs_2): # NOTE: This is the gradient wrt the inputs of inputs_2 # The gradient wrt the inputs of inputs_1 is -1 times this # This is sloppily coded -- the gradient that comes from kernel_utils is w.r.t. inputs_1 # but a minus sign is dropped to make it w.r.t. inputs_2 # oh well... r2 = np.abs(kernel_utils.dist2(self.ls.value, inputs_1, inputs_2)) r = np.sqrt(r2) grad_r2 = (5.0 / 6.0) * np.exp(-SQRT_5 * r) * (1 + SQRT_5 * r) return grad_r2[:, :, np.newaxis] * kernel_utils.grad_dist2( self.ls.value, inputs_1, inputs_2)
def cross_cov(self, inputs_1, inputs_2): r2 = np.abs(kernel_utils.dist2(self.ls.value, inputs_1, inputs_2)) cov = np.exp(-0.5 * r2) return cov
def cross_cov(self, inputs_1, inputs_2): r2 = np.abs(kernel_utils.dist2(self.ls.value, inputs_1, inputs_2)) r = np.sqrt(r2) cov = (1.0 + SQRT_5 * r + (5.0 / 3.0) * r2) * np.exp(-SQRT_5 * r) return cov
def cross_cov(self, inputs_1, inputs_2): r2 = np.abs(kernel_utils.dist2(self.ls.value, inputs_1, inputs_2)) r = np.sqrt(r2) cov = (1.0 + SQRT_5*r + (5.0/3.0)*r2) * np.exp(-SQRT_5*r) return cov