def get_tf_tensor(self, hyper_parameter: List[tf.Tensor], x_vector: np.ndarray, x_vector_: np.ndarray) -> tf.Tensor: assert x_vector is not None and x_vector_ is not None, "Input vectors x and x_ uninitialized: " + str( self) assert len(hyper_parameter) == self.get_number_of_hyper_parameter( ), "Invalid hyper_param size: " + str(self) with tf.name_scope("Matern_5_2_Kernel"): dist: tf.Tensor = aux_dist.manhattan_distance(x_vector, x_vector_) l = tf.abs(hyper_parameter[0]) frac: tf.Tensor = tf.divide( tf.sqrt(tf.cast(5, dtype=global_param.p_dtype)) * dist, l) third_summand: tf.Tensor = tf.divide( tf.cast(5, dtype=global_param.p_dtype) * tf.square(dist), tf.cast(3, dtype=global_param.p_dtype) * tf.square(l)) result: tf.Tensor = (tf.cast(1, dtype=global_param.p_dtype) + frac + third_summand) * tf.exp(-frac) if global_param.p_scaled_base_kernel: result = tf.multiply(hyper_parameter[1], result) self.last_hyper_parameter = hyper_parameter return result
def get_l_derivative_matrix(hyper_parameter: List[tf.Tensor], x_vector: np.ndarray, x_vector_: np.ndarray) \ -> tf.Tensor: diff: tf.Tensor = aux_dist.manhattan_distance( x_vector, x_vector_) #tf.math.subtract(x_vector, tf.transpose(x_vector_)) squared_distance: tf.Tensor = tf.square(diff, name="squared_distance") length_scale = hyper_parameter[0] l_squared: tf.Tensor = tf.square(length_scale, name="l_squared") cov_mat: tf.Tensor = \ tf.math.exp(-0.5 * tf.math.divide(squared_distance, l_squared), name="SquaredExponentialKernel") return tf.multiply( tf.divide(diff, tf.multiply(length_scale, l_squared)), cov_mat)
def get_weight_matrix(data_input: di.DataInput) -> tf.Tensor: distances = dist.euclidian_distance(data_input.data_x_train, data_input.inducting_x_train) reduce_min_1 = tf.reshape(tf.reduce_min(distances, axis=1), [-1, 1]) min_1_condition = distances == reduce_min_1 mask_1_distances = tf.reduce_max(distances) * tf.cast(min_1_condition, dtype=tf.float64) distances_masked = distances + mask_1_distances reduce_min_2 = tf.reshape(tf.reduce_min(distances_masked, axis=1), [-1, 1]) min_2_condition = distances_masked == reduce_min_2 weight_i = 1 - reduce_min_1 / (reduce_min_1 + reduce_min_2) w_matrix = tf.zeros_like(distances) + weight_i * tf.cast(min_1_condition, dtype=tf.float64) + \ (1 - weight_i) * tf.cast(min_2_condition, dtype=tf.float64) return w_matrix
def get_derivative_matrices(self, hyper_parameter: List[tf.Tensor], x_vector: np.ndarray, x_vector_: np.ndarray) -> \ List[tf.Tensor]: pi = tf.cast(np.pi, global_param.p_dtype) dist = aux_dist.manhattan_distance( x_vector, x_vector_ ) #tf.math.subtract(tf.reshape(x_vector, [-1, 1]), tf.reshape(x_vector_, [1, -1])) u = tf.math.multiply(pi, tf.math.divide(dist, hyper_parameter[1])) sine = tf.square(tf.sin(u)) l_squared = tf.math.square(hyper_parameter[0]) exponent = tf.divide( tf.multiply(tf.cast(-2, dtype=global_param.p_dtype), sine), l_squared) cov_mat = tf.math.exp(exponent, name="PeriodicKernel") self.last_hyper_parameter = hyper_parameter self.latest_cov_mat = cov_mat return [ self.get_l_derivative_matrix(hyper_parameter, cov_mat, exponent), self.get_p_derivative_matrix(hyper_parameter, cov_mat, pi, dist, u, l_squared) ]
def get_tf_tensor(self, hyper_parameter: List[tf.Tensor], x_vector: np.ndarray, x_vector_: np.ndarray) -> tf.Tensor: assert x_vector is not None and x_vector_ is not None, "Input vectors x and x_ uninitialized: " + str( self) assert len(hyper_parameter) == self.get_number_of_hyper_parameter( ), "Invalid hyper_param size: " + str(self) with tf.name_scope("SquaredExponentialKernel"): dist: tf.Tensor = aux_dist.euclidian_distance(x_vector, x_vector_) squared_distance: tf.Tensor = tf.square(dist, name="squared_distance") result: tf.Tensor = \ tf.math.exp(-0.5 * tf.math.divide(squared_distance, tf.square(hyper_parameter[0], name="l_squared")), name="SquaredExponentialKernel") if global_param.p_scaled_base_kernel: result = tf.multiply(hyper_parameter[1], result) self.last_hyper_parameter = hyper_parameter return result
def get_tf_tensor(self, hyper_parameter: List[tf.Tensor], x_vector: np.ndarray, x_vector_: np.ndarray) -> tf.Tensor: assert x_vector is not None and x_vector_ is not None, "Input vectors x and x_ uninitialized: " + str( self) assert len(hyper_parameter) == self.get_number_of_hyper_parameter( ), "Invalid hyper_param size: " + str(self) with tf.name_scope("PeriodicKernel"): dist: tf.Tensor = aux_dist.manhattan_distance(x_vector, x_vector_) sine_input: tf.Tensor = tf.math.multiply( tf.cast(np.pi, global_param.p_dtype), tf.math.divide(dist, hyper_parameter[1])) sine: tf.Tensor = tf.square(tf.math.sin(sine_input)) result: tf.Tensor = tf.math.exp(-2 * sine / tf.math.square(hyper_parameter[0]), name="PeriodicKernel") if global_param.p_scaled_base_kernel: result = tf.multiply(hyper_parameter[2], result) self.last_hyper_parameter = hyper_parameter return result
def get_c_derivative_matrix(hyper_parameter: List[tf.Tensor], x_vector: np.ndarray, x_vector_: np.ndarray) \ -> tf.Tensor: diff = aux_dist.manhattan_distance(x_vector, x_vector_) return tf.add( diff, tf.multiply(tf.cast(2, global_param.p_dtype), hyper_parameter[0]))