示例#1
0
  def __init__(
      self,
      state_size,
      activation='tanh',
      use_bias=True,
      weight_initializer='xavier_uniform',
      bias_initializer='zeros',
      input_gate=True,
      output_gate=True,
      forget_gate=True,
      with_peepholes=False,
      **kwargs):
    """
    :param state_size: state size: positive int
    :param activation: activation: string or callable
    :param use_bias: whether to use bias
    :param weight_initializer: weight initializer identifier
    :param bias_initializer: bias initializer identifier
    """
    # Call parent's constructor
    RNet.__init__(self, BasicLSTMCell.net_name)

    # Attributes
    self._state_size = state_size
    self._activation = activations.get(activation, **kwargs)
    self._use_bias = checker.check_type(use_bias, bool)
    self._weight_initializer = initializers.get(weight_initializer)
    self._bias_initializer = initializers.get(bias_initializer)

    self._input_gate = checker.check_type(input_gate, bool)
    self._output_gate = checker.check_type(output_gate, bool)
    self._forget_gate = checker.check_type(forget_gate, bool)
    self._with_peepholes = checker.check_type(with_peepholes, bool)

    self._output_scale = state_size
示例#2
0
    def __init__(self,
                 output_dim,
                 memory_units=None,
                 mem_config=None,
                 use_mem_wisely=False,
                 weight_regularizer=None,
                 **kwargs):
        # Call parent's constructor
        RNet.__init__(self, self.net_name)

        # Attributes
        self.output_dim = output_dim
        self.memory_units = (self.MemoryUnit.parse_units(mem_config)
                             if memory_units is None else memory_units)
        self.memory_units = [mu for mu in self.memory_units if mu.size > 0]
        checker.check_type(self.memory_units, Ham.MemoryUnit)

        self._state_size = sum([mu.size for mu in self.memory_units])
        self._activation = activations.get('tanh', **kwargs)

        self._use_mem_wisely = use_mem_wisely
        self._truncate = kwargs.get('truncate', False)
        self._weight_regularizer = regularizers.get(weight_regularizer,
                                                    **kwargs)
        # self._use_global_reg = kwargs.get('global_reg', False)
        self._kwargs = kwargs
示例#3
0
  def __init__(
      self,
      state_size,
      activation='tanh',
      use_bias=True,
      weight_initializer='xavier_normal',
      bias_initializer='zeros',
      **kwargs):
    """
    :param state_size: state size: positive int
    :param activation: activation: string or callable
    :param use_bias: whether to use bias
    :param weight_initializer: weight initializer identifier
    :param bias_initializer: bias initializer identifier
    """
    # Call parent's constructor
    RNet.__init__(self, self.net_name)

    # Attributes
    self._state_size = state_size
    self._activation = activations.get(activation, **kwargs)
    self._use_bias = checker.check_type(use_bias, bool)
    self._weight_initializer = initializers.get(weight_initializer)
    self._bias_initializer = initializers.get(bias_initializer)
    self._output_scale = state_size
示例#4
0
  def __init__(
      self,
      activation='tanh',
      weight_initializer='xavier_normal',
      use_bias=True,
      bias_initializer='zeros',
      layer_normalization=False,
      dropout_rate=0.0,
      zoneout_rate=0.0,
      **kwargs):

    # Call parent's constructor
    RNet.__init__(self, self.net_name)
    RNeuroBase.__init__(
      self,
      activation=activation,
      weight_initializer=weight_initializer,
      use_bias=use_bias,
      bias_initializer=bias_initializer,
      layer_normalization=layer_normalization,
      zoneout_rate=zoneout_rate,
      dropout_rate=dropout_rate,
      **kwargs)

    self._output_scale_ = None
示例#5
0
 def __init__(self, mark=None):
     Model.__init__(self, mark)
     RNet.__init__(self, 'RecurrentNet')
     self.superior = self
     self._default_net = self
     # Attributes
     self._state = NestedTensorSlot(self, 'State')
     # mascot will be initiated as a placeholder with no shape specified
     # .. and will be put into initializer argument of tf.scan
     self._mascot = None
示例#6
0
    def __init__(self, state_size, mem_fc=True, **kwargs):
        # Call parent's constructor
        RNet.__init__(self, self.net_name)

        # Attributes
        self._state_size = state_size
        self._activation = activations.get('tanh', **kwargs)
        # self._use_bias = True
        self._weight_initializer = initializers.get('xavier_normal')
        self._bias_initializer = initializers.get('zeros')
        self._output_scale = state_size
        self._fully_connect_memories = mem_fc
示例#7
0
  def __init__(self, mark=None):
    Model.__init__(self, mark)
    RNet.__init__(self, 'RecurrentNet')
    self.superior = self
    self._default_net = self
    # Attributes
    self._state_slot = NestedTensorSlot(self, 'State')
    # mascot will be initiated as a placeholder with no shape specified
    # .. and will be put into initializer argument of tf.scan
    self._mascot = None
    self._while_loop_free_output = None

    # TODO: BETA
    self.last_scan_output = None
    self.grad_delta_slot = NestedTensorSlot(self, 'GradDelta')
    self._grad_buffer_slot = NestedTensorSlot(self, 'GradBuffer')
示例#8
0
  def __init__(
      self,
      state_size,
      activation='tanh',
      weight_initializer='xavier_normal',
      input_gate=True,
      forget_gate=True,
      output_gate=True,
      use_g_bias=True,
      g_bias_initializer='zeros',
      use_i_bias=True,
      i_bias_initializer='zeros',
      use_f_bias=True,
      f_bias_initializer='zeros',
      use_o_bias=True,
      o_bias_initializer='zeros',
      output_as_mem=True,
      fully_connect_memory=True,
      activate_memory=True,
      truncate_grad=False,
      **kwargs):
    # Call parent's constructor
    RNet.__init__(self, self.net_name)

    # Attributes
    self._state_size = state_size
    self._input_gate = checker.check_type(input_gate, bool)
    self._forget_gate = checker.check_type(forget_gate, bool)
    self._output_gate = checker.check_type(output_gate, bool)
    self._activation = activations.get(activation, **kwargs)
    self._weight_initializer = initializers.get(weight_initializer)
    self._use_g_bias = checker.check_type(use_g_bias, bool)
    self._g_bias_initializer = initializers.get(g_bias_initializer)
    self._use_i_bias = checker.check_type(use_i_bias, bool)
    self._i_bias_initializer = initializers.get(i_bias_initializer)
    self._use_f_bias = checker.check_type(use_f_bias, bool)
    self._f_bias_initializer = initializers.get(f_bias_initializer)
    self._use_o_bias = checker.check_type(use_o_bias, bool)
    self._o_bias_initializer = initializers.get(o_bias_initializer)
    self._activate_mem = checker.check_type(activate_memory, bool)
    self._truncate_grad = checker.check_type(truncate_grad, bool)
    self._fc_memory = checker.check_type(fully_connect_memory, bool)
    self._output_as_mem = checker.check_type(output_as_mem, bool)
    self._kwargs = kwargs
示例#9
0
文件: lstms.py 项目: winkywow/tframe
  def __init__(
      self,
      state_size,
      cell_activation='sigmoid',                # g
      cell_activation_range=(-2, 2),
      memory_activation='sigmoid',              # h
      memory_activation_range=(-1, 1),
      weight_initializer='random_uniform',
      weight_initial_range=(-0.1, 0.1),
      use_cell_bias=False,
      cell_bias_initializer='random_uniform',
      cell_bias_init_range=(-0.1, 0.1),
      use_in_bias=True,
      in_bias_initializer='zeros',
      use_out_bias=True,
      out_bias_initializer='zeros',
      truncate=True,
      forward_gate=True,
      **kwargs):

    # Call parent's constructor
    RNet.__init__(self, OriginalLSTMCell.net_name)

    # Set state size
    self._state_size = state_size

    # Set activation
    # .. In LSTM98, cell activation is referred to as 'g',
    # .. while memory activation is 'h' and gate activation is 'f'
    self._cell_activation = activations.get(
      cell_activation, range=cell_activation_range)
    self._memory_activation = activations.get(
      memory_activation, range=memory_activation_range)
    self._gate_activation = activations.get('sigmoid')

    # Set weight and bias configs
    self._weight_initializer = initializers.get(
      weight_initializer, range=weight_initial_range)
    self._use_cell_bias = use_cell_bias
    self._cell_bias_initializer = initializers.get(
      cell_bias_initializer, range=cell_bias_init_range)
    self._use_in_bias = use_in_bias
    self._in_bias_initializer = initializers.get(in_bias_initializer)
    self._use_out_bias = use_out_bias
    self._out_bias_initializer = initializers.get(out_bias_initializer)

    if kwargs.get('rule97', False):
      self._cell_bias_initializer = self._weight_initializer
      self._in_bias_initializer = self._weight_initializer

    # Additional options
    self._truncate = truncate
    self._forward_gate = forward_gate

    # ...
    self._num_splits = 3
    self._output_scale = state_size
    self._h_size = (state_size * self._num_splits if self._forward_gate else
                    state_size)

    # TODO: BETA
    self.compute_gradients = self.truncated_rtrl