def call(self, inputs, mask=None): pos = K.relu(inputs) if K.backend() == 'theano': neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) * (inputs - K.abs(inputs)) * 0.5) else: neg = -self.alpha * K.relu(-inputs) return pos + neg
def call(self, inputs, mask=None): pos = K.relu(inputs) if K.backend() == 'theano': neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) * (inputs - K.abs(inputs)) * 0.5) else: neg = -self.alpha * K.relu(-inputs) return pos + neg
def _attention_mlp(self, memory): for i in range(self.attention_mlp_layers): memory = self._linear(memory, self.mlp_kernels[i], self.mlp_biases[i]) if i < self.attention_mlp_layers - 1: memory = K.relu(memory) return memory
def relu(x, alpha=0., max_value=None): return K.relu(x, alpha=alpha, max_value=max_value)
def call(self, inputs): return K.relu(inputs, alpha=self.alpha)
def relu6(x): return K.relu(x, max_value=6)
def relu6(x): return K.relu(x, max_value=6)
def relu(x, alpha=0., max_value=None): return K.relu(x, alpha=alpha, max_value=max_value)
def call(self, inputs): return K.relu(inputs, alpha=self.alpha)