def call(self, inputs, mask=None):
     pos = K.relu(inputs)
     if K.backend() == 'theano':
         neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) *
                (inputs - K.abs(inputs)) * 0.5)
     else:
         neg = -self.alpha * K.relu(-inputs)
     return pos + neg
 def call(self, inputs, mask=None):
   pos = K.relu(inputs)
   if K.backend() == 'theano':
     neg = (K.pattern_broadcast(self.alpha, self.param_broadcast) *
            (inputs - K.abs(inputs)) * 0.5)
   else:
     neg = -self.alpha * K.relu(-inputs)
   return pos + neg
예제 #3
0
 def _attention_mlp(self, memory):
     for i in range(self.attention_mlp_layers):
         memory = self._linear(memory, self.mlp_kernels[i],
                               self.mlp_biases[i])
         if i < self.attention_mlp_layers - 1:
             memory = K.relu(memory)
     return memory
예제 #4
0
def relu(x, alpha=0., max_value=None):
  return K.relu(x, alpha=alpha, max_value=max_value)
 def call(self, inputs):
   return K.relu(inputs, alpha=self.alpha)
예제 #6
0
def relu6(x):
    return K.relu(x, max_value=6)
예제 #7
0
def relu6(x):
  return K.relu(x, max_value=6)
예제 #8
0
def relu(x, alpha=0., max_value=None):
  return K.relu(x, alpha=alpha, max_value=max_value)
 def call(self, inputs):
     return K.relu(inputs, alpha=self.alpha)