def LSTM_cell(self, h, c, x, name): with tf.variable_scope(name): i, o, f = _gates([h, x], 'gates', 3) new_h = tf.nn.tanh(_linearX([h, x], 'new_h', h.shape[-1])) new_c = f * c + i * new_h new_h = tf.nn.tanh(new_c) * o return new_h, new_c
def DMN(self, prev, curr): M = prev[1] q = tf.mod(curr, self.dataset.num_skills) k = tf.gather(self.ks, [q]) w = tf.gather(self.wrs, [q]) r = tf.matmul(w, M) h_out = tf.nn.relu(_linearX([r, k], 'h_out', self.d)) v = tf.gather(self.vs, [curr]) new_h = self.GRU_cell(M, v * tf.reshape(w, [-1, 1]), 'new_h') return h_out, new_h
def DKVMN(self, prev, curr): M = prev[1] q = tf.mod(curr, self.dataset.num_skills) k = tf.gather(self.ks, [q]) w = tf.gather(self.wrs, [q]).reshape([-1, 1]) #w = tf.reshape(w, [-1, 1]) r = tf.matmul(w, M, transpose_a=True) h_out = tf.nn.relu(_linearX([r, k], 'h_out', self.d)) v = tf.gather(self.vs, [curr]) e = tf.nn.sigmoid(_linear(v, 'e', self.d_v)) a = tf.nn.tanh(_linear(v, 'a', self.d_v)) new_h = M * (1 - e) + a return h_out, new_h
def ADMN(self, prev, curr): M, h, pre_q = prev[1:] q = tf.mod(curr, self.dataset.num_skills) + 1 k = tf.gather(self.ks, [q]) delta = tf.tile(tf.reshape(tf.not_equal(q, pre_q), [1, 1]), [1, self.d]) d0 = tf.logical_and( delta, tf.greater(pre_q, tf.zeros([self.N, self.d], dtype=tf.int32))) w = tf.reshape(tf.gather(self.wrs, [q]), [-1, 1]) new_Mv = tf.where(d0, self.GRU_cell(M, tf.concat([w * h], 1), 'M1'), M) h = tf.where(delta, tf.matmul(w, new_Mv, transpose_a=True), h) h_out = tf.nn.relu(_linearX([h, k], 'h_out', self.d)) v = _embed(curr, 'v', self.dataset.num_skills * 2, [1, self.d_v]) new_h = self.GRU_cell(h, v, 'new_h') return h_out, new_Mv, new_h, q
def GRU_cell(self, h, x, name): with tf.variable_scope(name): r, z = _gates([h, x], 'gates', 2) new_h = tf.nn.tanh(_linearX([r * h, x], 'new_h', h.shape[-1])) return (1 - z) * h + z * new_h