def rbf(_x, _centers, _c): #output is sqrt(sum((x-center)^2) + c) _temp = _x - _centers _temp = np.pow(_temp, 2) _temp = np.reduce_sum(_temp, axis=2) + _c _temp = np.sqrt(_temp) return np.transpose(_temp)
def positional_encoding(embed, mask_mat): ''' return positional encoding matrix with (n_batch × N_CTX × embedding_dim) ''' n_batch = embed.shape[0] pos_mat = np.zeros((n_batch, N_CTX, embedding_dim), dtype=np.float32) for i in range(n_batch): l1 = int(np.reduce_sum(mask_mat[i,:])) pos_mat[i,:l1,:] = self.pe[:l1,:] return pos_mat
def loss(self, score, targets): """ :param score: (batch_size, 3) :param targets: (batch_size, 3) :return: """ loss = score - targets loss = np.reduce_sum(loss) return loss
def entropy(self, dist_info): """ Compute the entropy of the distribution Args: dist_info (dict) : dict of distribution parameters as numpy array Returns: (numpy array): entropy """ probs = dist_info['probs'] entropy = np.reduce_sum(-probs * np.log(probs), -1) return entropy
def matHadamard(T, v): shape = T.shape m = shape[0] n = shape[1] T = np.tensordot(v.reshape(m), T, 0) print(T) U_Tensor = np.concatenate( [np.identity(m).reshape(m, m, 1) for i in range(n)], 2) T = np.multiply(T, U_Tensor) T = np.reduce_sum(T, 0) return T
def concat_along_time_axis(cls, trajectories): """ Concatenates a list of trajectory objects along the time axis. Useful for assembling an entire trajectory from multiple sub-trajectories. """ # Check all subtrajectories have the same batch size and dt assert ([x.n for x in trajectories] == [1] * len(trajectories)) assert ([x.dt for x in trajectories ] == [trajectories[0].dt] * len(trajectories)) n = trajectories[0].n dt = trajectories[0].dt k = sum([x.k for x in trajectories]) position_nk2 = np.concatenate([x.position_nk2() for x in trajectories], axis=1) speed_nk1 = np.concatenate([x.speed_nk1() for x in trajectories], axis=1) acceleration_nk1 = np.concatenate( [x.acceleration_nk1() for x in trajectories], axis=1) heading_nk1 = np.concatenate([x.heading_nk1() for x in trajectories], axis=1) angular_speed_nk1 = np.concatenate( [x.angular_speed_nk1() for x in trajectories], axis=1) angular_acceleration_nk1 = np.concatenate( [x.angular_acceleration_nk1() for x in trajectories], axis=1) valid_horizons_n1 = np.reduce_sum( [x.valid_horizons_n1 for x in trajectories], axis=0) return cls(dt=dt, n=n, k=k, position_nk2=position_nk2, speed_nk1=speed_nk1, acceleration_nk1=acceleration_nk1, heading_nk1=heading_nk1, angular_speed_nk1=angular_speed_nk1, angular_acceleration_nk1=angular_acceleration_nk1, valid_horizons_n1=valid_horizons_n1, direct_init=True)
def call(self, inputs): one_hot = tf.one_hot(inputs, self.n_tokens) return tf.reduce_sum(one_hot, axis=1)[:, 1:]
max_vocabulary_size = 1000 n_oov_buckets = 100 sample_review_batches = train_set.map(lambda review, label: review) sample_reviews = np.concatenate(list( sample_review_batches.as_numpy_iterator()), axis=0) text_vectorization = TextVectorization(max_vocabulary_size, n_oov_buckets, input_shape=[]) text_vectorization.adapt(sample_reviews) text_vectorization(X_example) simple_example = tf.constant([[1, 3, 1, 0, 0], [2, 2, 0, 0, 0]]) print(tf.reduce_sum(tf.one_hot(simple_example, 4), axis=1)) class BagOfWords(tf.keras.layers.Layer): def __init__(self, n_tokens, dtype=tf.int32, **kwargs): super().__init__(dtype=tf.int32, **kwargs) self.n_tokens = n_tokens def call(self, inputs): one_hot = tf.one_hot(inputs, self.n_tokens) return tf.reduce_sum(one_hot, axis=1)[:, 1:] bag_of_words = BagOfWords(n_tokens=4) print(bag_of_words(simple_example))
import matplotlib.pyplot as plt import TensorPY as tp red_points = np.random.randn(5, 2) - 2 * np.ones((5, 2)) blue_points = np.random.randn(5, 2) + 2 * np.ones((5, 2)) plt.scatter(red_points[:, 0], red_points[:, 1], color='red') plt.scatter(blue_points[:, 0], blue_points[:, 1], color='blue') plt.show() tp.Graph().as_default() X = tp.Placeholder() W = tp.Variable([[1, -1], [1, -1]]) b = tp.Variable([0, 0]) # print(tp.add(tp.matmul(X, W), b)) # p = tp.sigmod(tp.add(tp.matmul(X, W), b)) p = tp.softmax(tp.add(tp.matmul(X, W), b)) session = tp.Session() output_p = session.run(p, {X: np.concatenate((blue_points, red_points))}) print(output_p) J = tp.negative(np.reduce_sum(np.reduce_sum(tp.multiply(c, tp.log(p)), axis=1)))