def step(self, x, states): prev_output = states[0] B_U = states[1] B_W = states[2] h = ternarize_dot(x * B_W, self.W) + self.b output = self.activation(h + ternarize_dot(prev_output * B_U, self.U)) return output, [output]
def step(self, inputs, states): if 0 < self.dropout < 1: h = ternarize_dot(inputs * states[1], self.kernel) else: h = ternarize_dot(inputs, self.kernel) if self.bias is not None: h = K.bias_add(h, self.bias) prev_output = states[0] if 0 < self.recurrent_dropout < 1: prev_output *= states[2] output = h + ternarize_dot(prev_output, self.recurrent_kernel) if self.activation is not None: output = self.activation(output) # Properly set learning phase on output tensor. if 0 < self.dropout + self.recurrent_dropout: output._uses_learning_phase = True return output, [output]