Example #1
0
    def __init__(self,
                 num_units,
                 activation=None,
                 reuse=None,
                 kernel_initialier=None,
                 bias_initializer=None,
                 name=None,
                 dtype=None,
                 **kwargs):
        super(GRUCell, self).__init__(_reuse=reuse,
                                      name=name,
                                      dtype=dtype,
                                      **kwargs)
        _check_supported_dtypes(self.dtype)

        if context.executing_eagerly() and context.num_gpus() > 0:
            logging.warn("This is not optimized for performance.")
        self.input_spec = input_spec.InputSpec(ndim=2)
        self._num_units = num_units
        if activation:
            self._activation = activations.get(activation)
        else:
            self._activation = math_ops.tanh
        self._kernel_initializer = initializers.get(kernel_initialier)
        self._bias_initializer = initializers.get(bias_initializer)
Example #2
0
 def build(self, inputs_shape):
     if inputs_shape[-1] is None:
         raise ValueError(
             "Expected inputs.shape[-1] to be known, saw shape: %s" %
             str(inputs_shape))
     _check_supported_dtypes(self.dtype)
     input_depth = inputs_shape[-1] - 2
     self._gate_kernel = self.add_variable(
         "gates/%s" % _WEIGHTS_VARIABLE_NAME,
         shape=[input_depth + self._num_units, 2 * self._num_units],
         initializer=self._kernel_initializer)
     self._gate_bias = self.add_variable(
         "gates/%s" % _BIAS_VARIABLE_NAME,
         shape=[2 * self._num_units],
         initializer=(self._bias_initializer
                      if self._bias_initializer is not None else
                      init_ops.constant_initializer(1.0, dtype=self.dtype)))
     self._candidate_kernel = self.add_variable(
         "candidate/%s" % _WEIGHTS_VARIABLE_NAME,
         shape=[input_depth + self._num_units, self._num_units],
         initializer=self._kernel_initializer)
     self._candidate_bias = self.add_variable(
         "candidate/%s" % _BIAS_VARIABLE_NAME,
         shape=[self._num_units],
         initializer=(self._bias_initializer if self._bias_initializer
                      is not None else init_ops.zeros_initializer(
                          dtype=self.dtype)))
    def __init__(self,
                 num_units,
                 activation=None,
                 reuse=None,
                 kernel_initializer=None,
                 bias_initializer=None,
                 name=None,
                 dtype=None,
                 **kwargs):
        super(ContinuousLSTMCell, self).__init__(_reuse=reuse,
                                                 name=name,
                                                 dtype=dtype,
                                                 **kwargs)
        _check_supported_dtypes(self.dtype)

        if context.executing_eagerly() and context.num_gpus() > 0:
            logging.warn(
                "%s: Note that this cell is not optimized for performance. "
                "Please use tf.contrib.cudnn_rnn.CudnnGRU for better "
                "performance on GPU.", self)
        # Inputs must be 2-dimensional.
        self.input_spec = input_spec.InputSpec(ndim=2)

        self._num_units = num_units
        if activation:
            self._activation = activations.get(activation)
        else:
            self._activation = math_ops.tanh
        self._kernel_initializer = initializers.get(kernel_initializer)
        self._bias_initializer = initializers.get(bias_initializer)