def cov_function(X, X2): sv = self.variance[:, i] + self.variance[:, j] cross_delay = tf.reshape(self.delay[:, i] - self.delay[:, j], [self.input_dim, 1, 1]) cross_phase = self.phase[i] - self.phase[j] cross_var = (2 * self.variance[:, i] * self.variance[:, j]) / sv cross_mean = tf.reshape( (self.variance[:, i] * self.mean[:, j] + self.variance[:, j] * self.mean[:, i]) / sv, [self.input_dim, 1, 1]) cross_magnitude = self.constant[i] * self.constant[j] \ * tf.exp(-0.25 * rsum(tf.square(self.mean[:, i] - self.mean[:, j]) / sv)) alpha = np.power(2 * np.pi, self.input_dim / 2) * tf.sqrt( rprod(cross_var)) * cross_magnitude return alpha * tf.exp(-0.5 * self.sqdist(X + self.delay[:, i], X2 + self.delay[:, j], cross_var)) \ * tf.cos(rsum(cross_mean * (self.dist(X, X2) + cross_delay), axis=0) + cross_phase)
def cov_function(X, X2): mean = tf.expand_dims(tf.slice(self.mean, [0, i], [self.input_dim, 1]), axis=2) temp = np.power(2 * np.pi, self.input_dim / 2) \ * tf.sqrt(rprod(self.variance[:, i])) \ * tf.square(self.constant[i]) return temp * tf.exp(-0.5 * self.sqdist(X, X2, self.variance[:, i])) \ * tf.cos(rsum(mean * self.dist(X, X2), 0))
def readweight(fseq_ls, bseq_ls, content_r_ls, pi_ls): """Return read weighting list `wr`. Parameters ---------- fseq_ls : `[batch_size, mem_size]`. bseq_ls : `[batch_size, mem_size]`. content_r_ls : `[batch_size, mem_size]`. pi_ls : `[batch_size, 3]`. Returns ------- """ wr_ls = list() for fseq, bseq, cont_r, pi in zip(fseq_ls, bseq_ls, content_r_ls, pi_ls): v = expand_dims(pi, axis=1) * stack([bseq, cont_r, bseq], axis=2) wr_ls.append(rsum(v, axis=2)) return wr_ls
def tpmemrecall(tpmem_tm1, p_tm1, ww, wr_tm1_ls): """Temporal memory recall. Parameters ---------- tpmem_tm1 : Temporal memory, `[batch_size, mem_size, mem_size]`. p_tm1 : `[batch_size, mem_size]`. ww : write weights, `[batch_size, mem_size]` wr_tm1_ls : read weights list, each element in the list has size of: `[batch_size, mem_size]` Returns ------- """ p = (1 - rsum(ww)) * p_tm1 + ww tpmem = ( (1 - expand_dims(ww, axis=2) - expand_dims(ww, axis=1)) * tpmem_tm1 + expand_dims(p_tm1, axis=2) @ expand_dims(ww, axis=1)) tp_tpmem = transpose(tpmem, perm=[0, 2, 1]) fseq = [squeeze(expand_dims(x, axis=1) @ tpmem) for x in wr_tm1_ls] bseq = [squeeze(expand_dims(x, axis=1) @ tp_tpmem) for x in wr_tm1_ls] return tpmem, p, fseq, bseq
def softmax(x, axis): return exp(x) / rsum(exp(x), axis=axis, keep_dims=True)