Exemplo n.º 1
0
 def _neurons_forward_with_memory(self,
                                  x,
                                  s,
                                  name,
                                  f,
                                  fc_mem,
                                  output_dim=None,
                                  use_bias=True,
                                  truncate=False,
                                  w_reg=None):
     # If fully connect memory
     if fc_mem:
         return self._neurons_forward(tf.concat([x, s], axis=1),
                                      name,
                                      f,
                                      output_dim,
                                      use_bias,
                                      truncate,
                                      w_reg=w_reg)
     # Otherwise
     dim = self._state_size if output_dim is None else output_dim
     return linker.neurons(dim,
                           x,
                           activation=f,
                           memory=s,
                           fc_memory=fc_mem,
                           use_bias=use_bias,
                           scope=name,
                           truncate=truncate,
                           weight_regularizer=w_reg)
Exemplo n.º 2
0
 def _neurons_forward(
     self, x, name, f, output_dim=None, use_bias=True, truncate=False,
     w_reg=None):
   assert name is not None and callable(f)
   dim = self._state_size if output_dim is None else output_dim
   return linker.neurons(
     dim, x, activation=f, use_bias=use_bias, truncate=truncate, scope=name,
     weight_regularizer=w_reg)
Exemplo n.º 3
0
 def neurons(self,
             x,
             s=None,
             num=None,
             fc_memory=True,
             is_gate=False,
             activation=None,
             scope=None,
             truncate=False,
             num_or_size_splits=None,
             weight_initializer=None,
             use_bias=None,
             bias_initializer=None,
             weight_regularizer=None,
             bias_regularizer=None,
             activity_regularizer=None,
             **kwargs):
     if num is None:
         if isinstance(num_or_size_splits, int):
             num = num_or_size_splits * self._state_size
         elif isinstance(num_or_size_splits, (list, tuple)):
             num = sum(num_or_size_splits)
         else:
             num = self._state_size
     if activation is None and is_gate:
         activation = tf.sigmoid
     if weight_initializer is None:
         weight_initializer = getattr(self, '_weight_initializer', None)
     if use_bias is None:
         use_bias = getattr(self, '_use_bias', None)
     if bias_initializer is None:
         bias_initializer = getattr(self, '_bias_initializer')
     if weight_regularizer is None:
         weight_regularizer = getattr(self, '_weight_regularizer', None)
     if bias_regularizer is None:
         bias_regularizer = getattr(self, '_bias_regularizer', None)
     if activity_regularizer is None:
         activity_regularizer = getattr(self, '_activity_regularizer', None)
     _kwargs = getattr(self, '_kwargs', {})
     return linker.neurons(num=num,
                           external_input=x,
                           activation=activation,
                           memory=s,
                           fc_memory=fc_memory,
                           scope=scope,
                           use_bias=use_bias,
                           truncate=truncate,
                           num_or_size_splits=num_or_size_splits,
                           weight_initializer=weight_initializer,
                           bias_initializer=bias_initializer,
                           weight_regularizer=weight_regularizer,
                           bias_regularizer=bias_regularizer,
                           activity_regularizer=activity_regularizer,
                           **_kwargs,
                           **kwargs)
Exemplo n.º 4
0
 def neurons(self,
             x,
             num=None,
             is_gate=False,
             activation=None,
             scope=None,
             truncate=False,
             num_or_size_splits=None,
             weight_initializer=None,
             use_bias=None,
             bias_initializer=None,
             weight_regularizer=None,
             bias_regularizer=None,
             activity_regularizer=None,
             prune_frac=0,
             **kwargs):
     if num is None:
         if isinstance(num_or_size_splits, int):
             assert self._output_dim is not None
             num = num_or_size_splits * self._output_dim
         elif isinstance(num_or_size_splits, (list, tuple)):
             num = sum(num_or_size_splits)
         else:
             assert self._output_dim is not None
             num = self._output_dim
     if activation is None and is_gate:
         activation = tf.sigmoid
     if weight_initializer is None:
         weight_initializer = getattr(self, '_weight_initializer', None)
     if use_bias is None:
         use_bias = getattr(self, '_use_bias', None)
     if bias_initializer is None:
         bias_initializer = getattr(self, '_bias_initializer')
     if weight_regularizer is None:
         weight_regularizer = getattr(self, '_weight_regularizer', None)
     if bias_regularizer is None:
         bias_regularizer = getattr(self, '_bias_regularizer', None)
     if activity_regularizer is None:
         activity_regularizer = getattr(self, '_activity_regularizer', None)
     return linker.neurons(num=num,
                           external_input=x,
                           activation=activation,
                           scope=scope,
                           use_bias=use_bias,
                           truncate=truncate,
                           num_or_size_splits=num_or_size_splits,
                           weight_initializer=weight_initializer,
                           bias_initializer=bias_initializer,
                           weight_regularizer=weight_regularizer,
                           bias_regularizer=bias_regularizer,
                           activity_regularizer=activity_regularizer,
                           prune_frac=prune_frac,
                           **kwargs)