예제 #1
0
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              dilation_rate=(1, 1),
              activation=None,
              use_bias=True,
              kernel_initializer='trainable_normal',
              bias_initializer='zeros',
              kernel_regularizer='normal_kl_divergence',
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
   super(Conv2DReparameterization, self).__init__(
       filters=filters,
       kernel_size=kernel_size,
       strides=strides,
       padding=padding,
       data_format=data_format,
       dilation_rate=dilation_rate,
       activation=activation,
       use_bias=use_bias,
       kernel_initializer=initializers.get(kernel_initializer),
       bias_initializer=initializers.get(bias_initializer),
       kernel_regularizer=regularizers.get(kernel_regularizer),
       bias_regularizer=regularizers.get(bias_regularizer),
       activity_regularizer=regularizers.get(activity_regularizer),
       kernel_constraint=constraints.get(kernel_constraint),
       bias_constraint=constraints.get(bias_constraint),
       **kwargs)
    def __init__(self,
                 units,
                 num_inducing,
                 mean_fn=Zeros(),
                 covariance_fn=ExponentiatedQuadratic(variance=1.,
                                                      lengthscale=1.),
                 inducing_inputs_initializer='random_normal',
                 inducing_outputs_initializer='trainable_normal',
                 inducing_inputs_regularizer=None,
                 inducing_outputs_regularizer='normal_kl_divergence',
                 inducing_inputs_constraint=None,
                 inducing_outputs_constraint=None,
                 **kwargs):
        """Constructs layer.

    Args:
      units: integer, dimensionality of layer.
      num_inducing: integer, number of inducing points for the approximation.
      mean_fn: Mean function, a callable taking an inputs Tensor of shape
        [batch, ...] and returning a Tensor of shape [batch].
      covariance_fn: Covariance function, a callable taking two input Tensors
        of shape [batch_x1, ...] and [batch_x2, ...] respectively, and returning
        a positive semi-definite matrix of shape [batch_x1, batch_x2].
      inducing_inputs_initializer: Initializer for the inducing inputs.
      inducing_outputs_initializer: Initializer for the inducing outputs.
      inducing_inputs_regularizer: Regularizer function applied to the inducing
        inputs.
      inducing_outputs_regularizer: Regularizer function applied to the inducing
        outputs.
      inducing_inputs_constraint: Constraint function applied to the inducing
        inputs.
      inducing_outputs_constraint: Constraint function applied to the inducing
        outputs.
      **kwargs: kwargs passed to parent class.
    """
        super(SparseGaussianProcess,
              self).__init__(units=units,
                             mean_fn=mean_fn,
                             covariance_fn=covariance_fn,
                             conditional_inputs=None,
                             conditional_outputs=None,
                             **kwargs)
        self.num_inducing = num_inducing
        self.inducing_inputs_initializer = initializers.get(
            inducing_inputs_initializer)
        self.inducing_outputs_initializer = initializers.get(
            inducing_outputs_initializer)
        self.inducing_inputs_regularizer = regularizers.get(
            inducing_inputs_regularizer)
        self.inducing_outputs_regularizer = regularizers.get(
            inducing_outputs_regularizer)
        self.inducing_inputs_constraint = constraints.get(
            inducing_inputs_constraint)
        self.inducing_outputs_constraint = constraints.get(
            inducing_outputs_constraint)
예제 #3
0
    def testTrainableNormal(self):
        shape = (100, )
        # TrainableNormal is expected to have var 1/shape[0]
        # because it by default has the fan_in mode scale normal std initializer.
        initializer = initializers.get('trainable_normal')
        normal = initializer(shape)
        self.evaluate(tf.global_variables_initializer())
        loc_value, scale_value = self.evaluate([
            # Get distribution of rv -> get distribution of Independent.
            normal.distribution.distribution.loc,
            normal.distribution.distribution.scale
        ])
        fan_in = shape[0]
        target_scale = 1.
        target_scale /= max(1., fan_in)
        target_scale = math.sqrt(target_scale)

        self.assertAllClose(loc_value, np.zeros(shape), atol=1e-4)
        # Tolerance is larger because of the scale normal std initializer.
        # In this case it has std around 0.01 (0.1*target_scale).
        self.assertAllClose(scale_value,
                            target_scale * np.ones(shape),
                            atol=5e-2)

        # Test the TrainableNormal initializer has the specified shape.
        normal_value = self.evaluate(normal)
        self.assertAllEqual(normal_value.shape, shape)
예제 #4
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer='trainable_normal',
              bias_initializer='zero',
              kernel_regularizer='log_uniform_kl_divergence',
              bias_regularizer=None,
              activity_regularizer=None,
              **kwargs):
   super(DenseVariationalDropout, self).__init__(
       units=units,
       activation=activation,
       use_bias=use_bias,
       kernel_initializer=initializers.get(kernel_initializer),
       bias_initializer=initializers.get(bias_initializer),
       kernel_regularizer=regularizers.get(kernel_regularizer),
       bias_regularizer=regularizers.get(bias_regularizer),
       activity_regularizer=regularizers.get(activity_regularizer),
       **kwargs)
예제 #5
0
 def __init__(self,
              units,
              activation='tanh',
              recurrent_activation='hard_sigmoid',
              use_bias=True,
              kernel_initializer='trainable_normal',
              recurrent_initializer='trainable_normal',
              bias_initializer='zeros',
              unit_forget_bias=True,
              kernel_regularizer='normal_kl_divergence',
              recurrent_regularizer='normal_kl_divergence',
              bias_regularizer=None,
              kernel_constraint=None,
              recurrent_constraint=None,
              bias_constraint=None,
              dropout=0.,
              recurrent_dropout=0.,
              implementation=1,
              **kwargs):
   super(LSTMCellReparameterization, self).__init__(
       units=units,
       activation=activation,
       recurrent_activation=recurrent_activation,
       use_bias=use_bias,
       kernel_initializer=initializers.get(kernel_initializer),
       recurrent_initializer=initializers.get(recurrent_initializer),
       bias_initializer=initializers.get(bias_initializer),
       unit_forget_bias=unit_forget_bias,
       kernel_regularizer=regularizers.get(kernel_regularizer),
       recurrent_regularizer=regularizers.get(recurrent_regularizer),
       bias_regularizer=regularizers.get(bias_regularizer),
       kernel_constraint=constraints.get(kernel_constraint),
       recurrent_constraint=constraints.get(recurrent_constraint),
       bias_constraint=constraints.get(bias_constraint),
       dropout=dropout,
       recurrent_dropout=recurrent_dropout,
       implementation=implementation,
       **kwargs)
예제 #6
0
    def testTrainableHalfCauchy(self):
        shape = (3, )
        initializer = initializers.get('trainable_half_cauchy')
        half_cauchy = initializer(shape)
        self.evaluate(tf.global_variables_initializer())
        loc_value, scale_value = self.evaluate([
            # Get distribution of rv -> get distribution of Independent.
            half_cauchy.distribution.distribution.loc,
            half_cauchy.distribution.distribution.scale
        ])
        self.assertAllClose(loc_value, np.zeros(shape), atol=1e-4)
        self.assertAllClose(scale_value, np.ones(shape), atol=1e-4)

        half_cauchy_value = self.evaluate(half_cauchy)
        self.assertAllEqual(half_cauchy_value.shape, shape)
        self.assertAllGreaterEqual(half_cauchy_value, 0.)