コード例 #1
0
ファイル: layers.py プロジェクト: manzar96/synesthesia
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              trainable=True,
              name=None,
              **kwargs):
     super(MultiInputDense, self).__init__(trainable=trainable,
                                           name=name,
                                           **kwargs)
     self.bias = None
     self.units = units
     self.activation = activation
     self.use_bias = use_bias
     self.kernel_initializer = kernel_initializer
     self.bias_initializer = bias_initializer
     self.kernel_regularizer = kernel_regularizer
     self.bias_regularizer = bias_regularizer
     self.activity_regularizer = activity_regularizer
     self.kernels = []
     self.input_spec = [
         base.InputSpec(min_ndim=2),
         base.InputSpec(min_ndim=2)
     ]
コード例 #2
0
ファイル: noisy_dense_impl.py プロジェクト: wenkesj/alchemy
 def __init__(self,
              num_units,
              sigma0=0.5,
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              kernel_trainable=True,
              trainable=True,
              name=None,
              **kwargs):
     super(NoisyDense,
           self).__init__(trainable=trainable,
                          name=name,
                          activity_regularizer=activity_regularizer,
                          **kwargs)
     self.num_units = num_units
     self.sigma0 = ops.convert_to_tensor(sigma0, dtype=dtypes.float32)
     self.activation = activation
     self.use_bias = use_bias
     self.kernel_initializer = kernel_initializer
     self.bias_initializer = bias_initializer
     self.kernel_regularizer = kernel_regularizer
     self.bias_regularizer = bias_regularizer
     self.kernel_constraint = kernel_constraint
     self.bias_constraint = bias_constraint
     self.kernel_trainable = kernel_trainable
     self.input_spec = base.InputSpec(min_ndim=2)
コード例 #3
0
    def __init__(
            self,
            num_units,
            W_gate,
            b_gate,
            W_cand,
            b_cand,

            # Non-intensive (Edit on 9/28)
            #g_percmask,
            #c_percmask,
            percinc,
            activation=None,
            reuse=None,
            kernel_initializer=None,
            bias_initializer=None,
            name=None):
        super(StochasticPercincBinaryGRUCell, self).__init__(_reuse=reuse,
                                                             name=name)

        # Inputs must be 2-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)

        self._num_units = num_units
        self._activation = activation or math_ops.tanh
        self._kernel_initializer = None
        self._bias_initializer = None
        self._gate_kernel = W_gate
        self._gate_bias = b_gate
        self._candidate_kernel = W_cand
        self._candidate_bias = b_cand
        self.percinc = percinc
コード例 #4
0
 def build(self, input_shape):
     input_shape = tensor_shape.TensorShape(input_shape)
     if input_shape[-1].value is None:
         raise ValueError('The last dimension of the inputs to `Dense` '
                          'should be defined. Found `None`.')
     self.input_spec = base.InputSpec(min_ndim=2,
                                      axes={-1: input_shape[-1].value})
     self.kernel = self.add_variable(
         'kernel',
         shape=[input_shape[-1].value, self.units],
         initializer=self.kernel_initializer,
         regularizer=self.kernel_regularizer,
         constraint=self.kernel_constraint,
         dtype=self.dtype,
         trainable=True)
     if self.use_bias:
         self.bias = self.add_variable('bias',
                                       shape=[
                                           self.units,
                                       ],
                                       initializer=self.bias_initializer,
                                       regularizer=self.bias_regularizer,
                                       constraint=self.bias_constraint,
                                       dtype=self.dtype,
                                       trainable=True)
     else:
         self.bias = None
     self.built = True
コード例 #5
0
ファイル: gdn.py プロジェクト: MandeepSinghthakur/compression
    def build(self, input_shape):
        channel_axis = self._channel_axis()
        input_shape = tensor_shape.TensorShape(input_shape)
        num_channels = input_shape[channel_axis].value
        if num_channels is None:
            raise ValueError("The channel dimension of the inputs to `GDN` "
                             "must be defined.")
        self._input_rank = input_shape.ndims
        self.input_spec = base.InputSpec(ndim=input_shape.ndims,
                                         axes={channel_axis: num_channels})

        self.beta = self._beta_parameterization(name="beta",
                                                shape=[num_channels],
                                                dtype=self.dtype,
                                                getter=self.add_variable,
                                                initializer=init_ops.Ones())

        self.gamma = self._gamma_parameterization(
            name="gamma",
            shape=[num_channels, num_channels],
            dtype=self.dtype,
            getter=self.add_variable,
            initializer=init_ops.Identity(gain=self._gamma_init))

        self.built = True
コード例 #6
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        input_dim = input_shape[-1].value
        kernel_shape = self.kernel_size + (input_dim, self.filters)
        self.kernel = self.add_variable(name="kernel",
                                        shape=kernel_shape,
                                        initializer=self.kernel_initializer,
                                        trainable=True,
                                        dtype=self.dtype)
        if self.use_wn:
            self.kernel = tf_utils.weight_norm(self.kernel, self.name)
        img_kernel = tf.reshape(self.kernel, shape=(-1, self.filters))
        tf_utils.plot_2d_tensor(img_kernel, "%s_kernel" % self.name)
        tf.summary.histogram("%s_kernel" % self.name, self.kernel)

        if self.add_bias:
            self.bias = self.add_variable(name="bias",
                                          shape=(self.filters, ),
                                          initializer=tf.zeros_initializer(),
                                          trainable=True,
                                          dtype=self.dtype)
            tf.summary.histogram("%s_bias" % self.name, self.bias)
        else:
            self.bias = None
        self.input_spec = base.InputSpec(ndim=self.rank + 2,
                                         axes={-1: input_dim})

        self.built = True
コード例 #7
0
ファイル: nn.py プロジェクト: mverzett/DeepHGCal
 def __init__(self,
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              trainable=True,
              name=None,
              **kwargs):
   super(FilterWiseDense, self).__init__(trainable=trainable, name=name,
                               activity_regularizer=activity_regularizer,
                               **kwargs)
   self.activation = activation
   self.use_bias = use_bias
   self.kernel_initializer = kernel_initializer
   self.bias_initializer = bias_initializer
   self.kernel_regularizer = kernel_regularizer
   self.bias_regularizer = bias_regularizer
   self.kernel_constraint = kernel_constraint
   self.bias_constraint = bias_constraint
   self.input_spec = base.InputSpec(min_ndim=2)
コード例 #8
0
ファイル: layers.py プロジェクト: manzar96/synesthesia
 def build(self, input_shapes):
     input_shapes = [
         tensor_shape.TensorShape(input_shape)
         for input_shape in input_shapes
     ]
     for i, input_shape in enumerate(input_shapes):
         if input_shape[-1].value is None:
             raise ValueError('The last dimension of the inputs to '
                              '`MultiInputDense` should be defined.'
                              ' Found `None`.')
         self.input_spec[i] = base.InputSpec(
             min_ndim=2, axes={-1: input_shape[-1].value})
         self.kernels.append(
             self.add_variable('kernel_input_{}'.format(i),
                               shape=[input_shape[-1].value, self.units],
                               initializer=self.kernel_initializer,
                               regularizer=self.kernel_regularizer,
                               dtype=self.dtype,
                               trainable=True))
     if self.use_bias:
         self.bias = self.add_variable('bias',
                                       shape=[
                                           self.units,
                                       ],
                                       initializer=self.bias_initializer,
                                       regularizer=self.bias_regularizer,
                                       dtype=self.dtype,
                                       trainable=True)
     else:
         self.bias = None
     self.built = True
コード例 #9
0
    def __init__(self,
                 num_spatial,
                 num_output=10,
                 depth=1,
                 relu_units=5,
                 gauss_units=5,
                 initializer=None,
                 name=None):
        """Initialize the parameters for an LSTM cell.

        Args:
          initializer: (optional) The initializer to use for the weight and
            projection matrices.
          name: String, the name of the layer. Layers with the same name will
            share weights, but to avoid mistakes we require reuse=True in such
            cases.

          When restoring from CudnnLSTM-trained checkpoints, use
          `CudnnCompatibleLSTMCell` instead.
        """
        super(JanSparseCell2, self).__init__(_reuse=False, name=name)

        # Inputs must be 2-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)

        self._initializer = initializer
        self._num_spatial = num_spatial
        self._num_output = num_output
        self._output_size = num_output

        self._relu_units = relu_units
        self._gauss_units = gauss_units
        self._depth = depth

        self._first = tf.layers.Dense(units=num_output * 10)
コード例 #10
0
ファイル: rnn_cell.py プロジェクト: xiaohanghang/Nabu-MSSS
    def __init__(self,
                 num_capsules,
                 capsule_dim,
                 routing_iters,
                 activation=None,
                 input_probability_fn=None,
                 recurrent_probability_fn=None,
                 kernel_initializer=None,
                 logits_initializer=None,
                 reuse=None,
                 name=None):
        super(RecCapsuleCell, self).__init__(_reuse=reuse, name=name)

        #For the moment expecting inputs to be 3-dimensional at every time step.
        #[batch_size x num_in_capsules X dim_in_capsules]
        self.input_spec = base_layer.InputSpec(ndim=3)

        self.num_capsules = num_capsules
        self.capsule_dim = capsule_dim
        self.kernel_initializer = kernel_initializer or capsule_initializer()
        self.logits_initializer = logits_initializer or tf.zeros_initializer()
        self.routing_iters = routing_iters
        self._activation = activation or ops.squash
        self.input_probability_fn = input_probability_fn or tf.nn.softmax
        self.recurrent_probability_fn = recurrent_probability_fn or tf.nn.sigmoid
コード例 #11
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=None,
              kernel_rho_initializer=init_ops.constant_initializer(-3),
              bias_rho_initializer=init_ops.constant_initializer(-3),
              kernel_prior_density=gaussian_mixture_density(1, 0.1, 0),
              bias_prior_density=gaussian_mixture_density(1, 0.1, 0),
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              trainable=True,
              name=None,
              **kwargs):
     super(BayesianDense,
           self).__init__(trainable=trainable,
                          name=name,
                          activity_regularizer=activity_regularizer,
                          **kwargs)
     self.units = units
     self.activation = activation
     self.use_bias = use_bias
     self.kernel_initializer = kernel_initializer
     self.bias_initializer = bias_initializer
     self.kernel_rho_initializer = kernel_rho_initializer
     self.bias_rho_initializer = bias_rho_initializer
     self.kernel_prior_density = kernel_prior_density
     self.bias_prior_density = bias_prior_density
     self.kernel_constraint = kernel_constraint
     self.bias_constraint = bias_constraint
     self.input_spec = base.InputSpec(min_ndim=2)
コード例 #12
0
ファイル: dsrnn.py プロジェクト: samersaabjr/DSRNN
    def __init__(self,
                 num_units,
                 c_n=1,
                 activation=None,
                 reuse=None,
                 name=None,
                 dtype=None,
                 **kwargs):
        super(DSRNNCell, self).__init__(_reuse=reuse,
                                        name=name,
                                        dtype=dtype,
                                        **kwargs)
        if context.executing_eagerly() and context.num_gpus() > 0:
            logging.warn(
                "%s: Note that this cell is not optimized for performance. "
                "Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better "
                "performance on GPU.", self)

        # Inputs must be 2-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)

        self._c_n = c_n
        if (self._c_n == 0) and (not isinstance(self._c_n, int)):
            raise ValueError("Expected integer c_k > 0")
        self._num_units = num_units

        if activation:
            self._activation = activations.get(activation)
        else:
            self._activation = math_ops.tanh
コード例 #13
0
    def __init__(self,
                 num_units,
                 forget_bias=1.0,
                 activation=None,
                 reuse=None,
                 name=None,
                 dimensions=2):
        """Initialize the multi dimensional LSTM cell.
        Args:
          num_units: int, The number of units in the LSTM cell.
          forget_bias: float, The bias added to forget gates (see above).
            Must set to `0.0` manually when restoring from CudnnLSTM-trained
            checkpoints.
          activation: Activation function of the inner states.  Default: `tanh`.
          reuse: (optional) Python boolean describing whether to reuse variables
            in an existing scope.  If not `True`, and the existing scope already has
            the given variables, an error is raised.
          name: String, the name of the layer. Layers with the same name will
            share weights, but to avoid mistakes we require reuse=True in such
            cases.
          When restoring from CudnnLSTM-trained checkpoints, must use
          `CudnnCompatibleLSTMCell` instead.
        """
        super(MDLSTMCell, self).__init__(_reuse=reuse, name=name)
        # Inputs must be 2-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)

        self._num_units = num_units
        self._dimensions = dimensions
        self._forget_bias = forget_bias
        self._activation = activation or math_ops.tanh
コード例 #14
0
    def __init__(self,
                 num_units,
                 num_slots=1,
                 num_heads=16,
                 initializer=None,
                 forget_bias=1.0,
                 reuse=None,
                 name=None,
                 dtype=None,
                 **kwargs):
        super(RMCell, self).__init__(_reuse=reuse,
                                     name=name,
                                     dtype=dtype,
                                     **kwargs)
        if context.executing_eagerly() and context.num_gpus() > 0:
            logging.warn(
                "%s: Note that this cell is not optimized for performance. "
                "Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
                "performance on GPU.", self)

        # Inputs must be 2-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)

        self._num_units = num_units
        self._num_slots = num_slots
        self._num_heads = num_heads
        self._initializer = initializers.get(initializer)
        self._forget_bias = forget_bias
        self._activation = math_ops.tanh

        self._state_size = num_units
        self._output_size = num_units
コード例 #15
0
 def build(self, input_shape):
     from tensorflow.python.layers import base
     from tensorflow.python.framework import tensor_shape
     input_shape = tensor_shape.TensorShape(input_shape)
     if input_shape[-1].value is None:
         raise ValueError('The last dimension of the inputs to `Dense` '
                          'should be defined. Found `None`.')
     self.input_spec = base.InputSpec(min_ndim=2,
                                      axes={-1: input_shape[-1].value})
     self.kernel = self.add_variable('kernel',
                                     shape=[input_shape[-1].value, self.units],
                                     initializer=self.kernel_initializer,
                                     regularizer=self.kernel_regularizer,
                                     constraint=self.kernel_constraint,
                                     dtype=self.dtype,
                                     trainable=True)
     if self.mode == tf.estimator.ModeKeys.TRAIN:
         if self.mask is None:
             mask = tf.ones_like(self.kernel)
             self.mask = tf.nn.dropout(mask, keep_prob=self.keep_prob) * self.keep_prob
         self.kernel = self.kernel * self.mask
     if self.use_bias:
         self.bias = self.add_variable('bias',
                                       shape=[self.units, ],
                                       initializer=self.bias_initializer,
                                       regularizer=self.bias_regularizer,
                                       constraint=self.bias_constraint,
                                       dtype=self.dtype,
                                       trainable=True)
     else:
         self.bias = None
     self.built = True
コード例 #16
0
 def build(self, input_shape):
     input_shape = tensor_shape.TensorShape(input_shape)
     if input_shape[-1].value is None:
         raise ValueError('The last dimension of the inputs to `Dense` '
                          'should be defined. Found `None`.')
     self.input_spec = base.InputSpec(min_ndim=2,
                                      axes={-1: input_shape[-1].value})
     kernel = self.add_variable('kernel',
                                shape=[input_shape[-1].value, self.units],
                                initializer=self.kernel_initializer,
                                regularizer=self.kernel_regularizer,
                                constraint=self.kernel_constraint,
                                dtype=self.dtype,
                                trainable=True)
     if self.weight_norm:
         self.g = self.add_variable("wn/g",
                                    shape=(self.units, ),
                                    initializer=init_ops.ones_initializer(),
                                    dtype=kernel.dtype,
                                    trainable=True)
         self.kernel = nn_impl.l2_normalize(kernel, dim=0) * self.g
     else:
         self.kernel = kernel
     if self.use_bias:
         self.bias = self.add_variable('bias',
                                       shape=(self.units, ),
                                       initializer=self.bias_initializer,
                                       regularizer=self.bias_regularizer,
                                       constraint=self.bias_constraint,
                                       dtype=self.dtype,
                                       trainable=True)
     else:
         self.bias = None
     self.built = True
コード例 #17
0
ファイル: lateral_impl.py プロジェクト: wenkesj/alchemy
 def __init__(self,
              allow_self_connections=False,
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              kernel_trainable=True,
              trainable=True,
              name=None,
              **kwargs):
     super(Lateral,
           self).__init__(trainable=trainable,
                          name=name,
                          activity_regularizer=activity_regularizer,
                          **kwargs)
     self.allow_self_connections = ops.convert_to_tensor(
         allow_self_connections, dtype=dtypes.bool)
     self.activation = activation
     self.use_bias = use_bias
     self.kernel_initializer = kernel_initializer
     self.bias_initializer = bias_initializer
     self.kernel_regularizer = kernel_regularizer
     self.bias_regularizer = bias_regularizer
     self.kernel_constraint = kernel_constraint
     self.bias_constraint = bias_constraint
     self.kernel_trainable = kernel_trainable
     self.input_spec = base.InputSpec(min_ndim=2)
コード例 #18
0
ファイル: lateral_impl.py プロジェクト: wenkesj/alchemy
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        if input_shape[-1].value is None:
            raise ValueError('The last dimension of the inputs to `Dense` '
                             'should be defined. Found `None`.')
        input_dim = input_shape[-1].value
        self.input_spec = base.InputSpec(min_ndim=2, axes={-1: input_dim})
        self.kernel = self.add_variable('kernel',
                                        shape=[input_dim, input_dim],
                                        initializer=self.kernel_initializer,
                                        regularizer=self.kernel_regularizer,
                                        constraint=self.kernel_constraint,
                                        dtype=self.dtype,
                                        trainable=self.kernel_trainable)

        self.kernel = control_flow_ops.cond(
            self.allow_self_connections,
            true_fn=lambda: self.kernel,
            false_fn=lambda: gen_array_ops.matrix_set_diag(
                self.kernel, array_ops.zeros(input_dim)))

        if self.use_bias:
            self.bias = self.add_variable('bias',
                                          shape=[
                                              input_dim,
                                          ],
                                          initializer=self.bias_initializer,
                                          regularizer=self.bias_regularizer,
                                          constraint=self.bias_constraint,
                                          dtype=self.dtype,
                                          trainable=True)
        else:
            self.bias = None
        self.built = True
コード例 #19
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              trainable=True,
              weight_norm=True,
              mean_only_batch_norm=True,
              name=None,
              **kwargs):
     super(Dense, self).__init__(trainable=trainable,
                                 name=name,
                                 activity_regularizer=activity_regularizer,
                                 **kwargs)
     self.units = units
     self.activation = activation
     self.use_bias = use_bias
     self.kernel_initializer = kernel_initializer
     self.bias_initializer = bias_initializer
     self.kernel_regularizer = kernel_regularizer
     self.bias_regularizer = bias_regularizer
     self.kernel_constraint = kernel_constraint
     self.bias_constraint = bias_constraint
     self.input_spec = base.InputSpec(min_ndim=2)
     self.weight_norm = weight_norm,
     self.mean_only_batch_norm = mean_only_batch_norm,
コード例 #20
0
    def __init__(self,
                 num_units,
                 conv_matrix,
                 n_nodes,
                 recurrent_min_abs=0,
                 recurrent_max_abs=None,
                 recurrent_kernel_initializer=None,
                 input_kernel_initializer=None,
                 activation=nn_ops.relu,
                 projection_fn=None,
                 reuse=None,
                 name=None):
        super(GConvIndRNNCell, self).__init__(_reuse=reuse, name=name)

        # Inputs must be 3-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)

        self._num_units = num_units
        self._conv_matrix = conv_matrix
        self._n_nodes = n_nodes
        self._recurrent_min_abs = recurrent_min_abs
        self._recurrent_max_abs = recurrent_max_abs
        self._recurrent_kernel_initializer = recurrent_kernel_initializer
        self._input_kernel_initializer = input_kernel_initializer
        self._activation = activation
        self._projection_fn = projection_fn
コード例 #21
0
    def build(self, input_shape):
        if input_shape[-1].value is None:
            raise ValueError('The last dimension of the inputs to `GCN` '
                             'should be defined. Found `None`.')
        self._input_depth = input_shape[-1].value
        self._n_nodes = input_shape[-2].value
        self._input_spec = base.InputSpec(min_ndim=2,
                                          axes={-1: input_shape[-1].value})

        self._conv_kernel = self.add_variable(
            "conv_kernel", [self._input_depth, self._num_units],
            dtype=self.dtype,
            initializer=self._kernel_initializer,
            regularizer=self._kernel_regularizer,
            trainable=True)

        if self._use_bias:
            self._bias = self.add_variable("bias",
                                           shape=[self._num_units],
                                           initializer=self._bias_initializer,
                                           regularizer=self._bias_regularizer,
                                           trainable=True)
        else:
            self._bias = None
        self.built = True
コード例 #22
0
ファイル: MultiSparse.py プロジェクト: kangtianyu/RINC
 def __init__(self,
              units,
              smatrix,
              smooth_num,
              activation=None,
              use_bias=False,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              trainable=True,
              name=None,
              **kwargs):
     super(MultiSparse,
           self).__init__(trainable=trainable,
                          name=name,
                          activity_regularizer=activity_regularizer,
                          **kwargs)
     self.units = units
     self.smatrix = smatrix
     self.smooth_num = smooth_num
     self.activation = activation
     self.use_bias = use_bias
     self.kernel_initializer = kernel_initializer
     self.bias_initializer = bias_initializer
     self.kernel_regularizer = kernel_regularizer
     self.bias_regularizer = bias_regularizer
     self.kernel_constraint = kernel_constraint
     self.bias_constraint = bias_constraint
     self.input_spec = base.InputSpec(min_ndim=2)
コード例 #23
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              trainable=True,
              name=None,
              **kwargs):
   super(MaskedFullyConnected, self).__init__(
       trainable=trainable,
       name=name,
       activity_regularizer=activity_regularizer,
       **kwargs)
   self.units = units
   self.activation = activation
   self.use_bias = use_bias
   self.kernel_initializer = kernel_initializer
   self.bias_initializer = bias_initializer
   self.kernel_regularizer = kernel_regularizer
   self.bias_regularizer = bias_regularizer
   self.input_spec = base.InputSpec(min_ndim=2)
コード例 #24
0
 def build(self, input_shape):
     input_shape = tf.TensorShape(input_shape)
     in_size = input_shape.with_rank_at_least(2)[-1].value
     self.input_spec = base.InputSpec(min_ndim=2, axes={-1: in_size})
     self.kernel_mu = self.add_variable('posterior_kernel_mu',
                                        shape=[in_size, self.units],
                                        initializer=self.mu_initializer,
                                        dtype=self.dtype,
                                        trainable=True)
     self.kernel_rho = self.add_variable('posterior_kernel_rho',
                                         shape=[in_size, self.units],
                                         initializer=self.rho_initializer,
                                         dtype=self.dtype,
                                         trainable=True)
     if self.use_bias:
         self.bias_mu = self.add_variable(
             'posterior_bias_mu',
             shape=[
                 self.units,
             ],
             initializer=init.zeros_initializer(),
             dtype=self.dtype,
             trainable=True)
         self.bias_rho = self.add_variable('posterior_bias_rho',
                                           shape=[
                                               self.units,
                                           ],
                                           initializer=self.rho_initializer)
     else:
         self.bias_mu = None
         self.bias_rho = None
     self.built = True
コード例 #25
0
  def __init__(self, num_units, forget_bias=1.0,
               state_is_tuple=True, activation=None, reuse=None, name=None):
    """Initialize the basic LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
        Must set to `0.0` manually when restoring from CudnnLSTM-trained
        checkpoints.
      state_is_tuple: If True, accepted and returned states are 2-tuples of
        the `c_state` and `m_state`.  If False, they are concatenated
        along the column axis.  The latter behavior will soon be deprecated.
      activation: Activation function of the inner states.  Default: `tanh`.
      reuse: (optional) Python boolean describing whether to reuse variables
        in an existing scope.  If not `True`, and the existing scope already has
        the given variables, an error is raised.
      name: String, the name of the layer. Layers with the same name will
        share weights, but to avoid mistakes we require reuse=True in such
        cases.

      When restoring from CudnnLSTM-trained checkpoints, must use
      `CudnnCompatibleLSTMCell` instead.
    """
    super(BasicLSTMCell, self).__init__(_reuse=reuse, name=name)
    if not state_is_tuple:
      logging.warn("%s: Using a concatenated state is slower and will soon be "
                   "deprecated.  Use state_is_tuple=True.", self)

    # Inputs must be 2-dimensional.
    self.input_spec = base_layer.InputSpec(ndim=2)

    self._num_units = num_units
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation or math_ops.tanh
コード例 #26
0
    def __init__(self,
                 num_units,
                 recurrent_min_abs=0,
                 recurrent_max_abs=None,
                 input_initializer=None,
                 recurrent_initializer=None,
                 activation=None,
                 reuse=None,
                 name=None,
                 batch_norm=False,
                 in_training=False,
                 layer_idx=0):
        super(IndRNNCell, self).__init__(_reuse=reuse, name=name)

        # Inputs must be 2-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)

        self._num_units = num_units
        self._recurrent_min_abs = recurrent_min_abs
        self._recurrent_max_abs = recurrent_max_abs
        self._recurrent_max_abs_tensor = tf.cast(tf.constant(
            np.ones((num_units, num_units)) * self._recurrent_max_abs),
                                                 dtype=tf.float32)
        self._recurrent_initializer = recurrent_initializer
        self._input_initializer = input_initializer
        self._activation = activation or nn_ops.relu

        self._batch_norm = batch_norm
        self._in_training = in_training
        self._layer_idx = layer_idx
        self.topdown = True
コード例 #27
0
    def __init__(self,
                 num_units,
                 forget_bias=1.0,
                 cell_clip=None,
                 use_peephole=False,
                 reuse=None,
                 name="lstm_fused_cell"):
        """Initialize the LSTM cell.

    Args:
      num_units: int, The number of units in the LSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      cell_clip: clip the cell to this value. Default is no cell clipping.
      use_peephole: Whether to use peephole connections or not.
      reuse: (optional) boolean describing whether to reuse variables in an
        existing scope.  If not `True`, and the existing scope already has the
        given variables, an error is raised.
      name: String, the name of the layer. Layers with the same name will
        share weights, but to avoid mistakes we require reuse=True in such
        cases.  By default this is "lstm_cell", for variable-name compatibility
        with `tf.nn.rnn_cell.LSTMCell`.
    """
        super(LSTMBlockFusedCell, self).__init__(_reuse=reuse, name=name)
        self._num_units = num_units
        self._forget_bias = forget_bias
        self._cell_clip = cell_clip if cell_clip is not None else -1
        self._use_peephole = use_peephole

        # Inputs must be 3-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=3)
コード例 #28
0
  def build(self, input_shape):
    input_shape = tensor_shape.TensorShape(input_shape)
    channel_axis = self._channel_axis
    input_channels = input_shape[channel_axis].value
    if input_channels is None:
      raise ValueError("The channel dimension of the inputs must be defined.")
    kernel_shape = self.kernel_support + (input_channels, self.filters)
    if self.channel_separable:
      output_channels = self.filters * input_channels
    else:
      output_channels = self.filters

    if self.kernel_parameterizer is None:
      getter = self.add_variable
    else:
      getter = functools.partial(
          self.kernel_parameterizer, getter=self.add_variable)
    self._kernel = getter(
        name="kernel", shape=kernel_shape, dtype=self.dtype,
        initializer=self.kernel_initializer,
        regularizer=self.kernel_regularizer)

    if self.bias_parameterizer is None:
      getter = self.add_variable
    else:
      getter = functools.partial(
          self.bias_parameterizer, getter=self.add_variable)
    self._bias = None if not self.use_bias else getter(
        name="bias", shape=(output_channels,), dtype=self.dtype,
        initializer=self.bias_initializer, regularizer=self.bias_regularizer)

    self.input_spec = base.InputSpec(
        ndim=self._rank + 2, axes={channel_axis: input_channels})

    super(_SignalConv, self).build(input_shape)
コード例 #29
0
ファイル: gru_ops.py プロジェクト: vinegreti2010/CFRFServers
  def __init__(self,
               num_units=None,
               cell_size=None,
               reuse=None,
               name="gru_cell"):
    """Initialize the Block GRU cell.

    Args:
      num_units: int, The number of units in the GRU cell.
      cell_size: int, The old (deprecated) name for `num_units`.
      reuse: (optional) boolean describing whether to reuse variables in an
        existing scope.  If not `True`, and the existing scope already has the
        given variables, an error is raised.
      name: String, the name of the layer. Layers with the same name will
        share weights, but to avoid mistakes we require reuse=True in such
        cases.  By default this is "lstm_cell", for variable-name compatibility
        with `tf.nn.rnn_cell.GRUCell`.

    Raises:
      ValueError: if both cell_size and num_units are not None;
        or both are None.
    """
    super(GRUBlockCell, self).__init__(_reuse=reuse, name=name)
    if (cell_size is None) == (num_units is None):
      raise ValueError(
          "Exactly one of num_units or cell_size must be provided.")
    if num_units is None:
      num_units = cell_size
    self._cell_size = num_units
    # Inputs must be 2-dimensional.
    self.input_spec = base_layer.InputSpec(ndim=2)
コード例 #30
0
ファイル: Gumble_lstm.py プロジェクト: kyzhouhzau/Gumble_lstm
    def __init__(self,
                 num_units,
                 bias=0.0,
                 hide_kernel_initializer=None,
                 input_kernel_initializer=None,
                 cell_kernel_intializer=None,
                 state_is_tuple=True,
                 cell_clip=True,
                 activation=None,
                 reuse=None,
                 name=None):
        super(GLSTM, self).__init__(_reuse=reuse, name=name)

        # Inputs must be 2-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)
        self._state_is_tuple = state_is_tuple
        self._num_units = num_units
        self._bias = bias
        self._cell_clip = cell_clip
        self._hide_initializer = hide_kernel_initializer
        self._cell_initializer = cell_kernel_intializer
        self._input_initializer = input_kernel_initializer

        self._activation = activation or nn_ops.relu
        self._state_size = (LSTMStateTuple(num_units, num_units)
                            if state_is_tuple else 2 * num_units)