Beispiel #1
0
    def testRegularizersInRegularizationLosses(self, active_final, use_bias,
                                               use_dropout):
        if use_bias:
            regularizers = {
                "w": contrib_layers.l1_regularizer(scale=0.5),
                "b": contrib_layers.l2_regularizer(scale=0.5)
            }
        else:
            regularizers = {"w": contrib_layers.l1_regularizer(scale=0.5)}

        inputs = tf.random_normal(dtype=tf.float32,
                                  shape=[self.batch_size, self.input_size])
        mlp = snt.nets.MLP(name=self.module_name,
                           output_sizes=self.output_sizes,
                           regularizers=regularizers,
                           use_dropout=use_dropout)
        mlp(inputs)

        graph_regularizers = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        self.assertEqual(len(graph_regularizers), 3 * (2 if use_bias else 1))
        if not tf.executing_eagerly():
            self.assertRegexpMatches(graph_regularizers[0].name,
                                     ".*l1_regularizer.*")
            if use_bias:
                self.assertRegexpMatches(graph_regularizers[1].name,
                                         ".*l2_regularizer.*")
Beispiel #2
0
    def prepare(self):
        """ Setup the weight initalizers and regularizers. """
        config = self.config

        self.conv_kernel_initializer = layers.xavier_initializer()

        if self.train_cnn and config.conv_kernel_regularizer_scale > 0:
            self.conv_kernel_regularizer = layers.l2_regularizer(
                scale=config.conv_kernel_regularizer_scale)
        else:
            self.conv_kernel_regularizer = None

        if self.train_cnn and config.conv_activity_regularizer_scale > 0:
            self.conv_activity_regularizer = layers.l1_regularizer(
                scale=config.conv_activity_regularizer_scale)
        else:
            self.conv_activity_regularizer = None

        self.fc_kernel_initializer = tf.random_uniform_initializer(
            minval=-config.fc_kernel_initializer_scale,
            maxval=config.fc_kernel_initializer_scale)

        if self.is_train and config.fc_kernel_regularizer_scale > 0:
            self.fc_kernel_regularizer = layers.l2_regularizer(
                scale=config.fc_kernel_regularizer_scale)
        else:
            self.fc_kernel_regularizer = None

        if self.is_train and config.fc_activity_regularizer_scale > 0:
            self.fc_activity_regularizer = layers.l1_regularizer(
                scale=config.fc_activity_regularizer_scale)
        else:
            self.fc_activity_regularizer = None
  def testRegularizersInRegularizationLosses(self, transpose, use_bias):
    if transpose:
      module = functools.partial(snt.nets.ConvNet2DTranspose,
                                 output_shapes=[[100, 100]])
    else:
      module = snt.nets.ConvNet2D
    if use_bias:
      regularizers = {
          "w": contrib_layers.l1_regularizer(scale=0.5),
          "b": contrib_layers.l2_regularizer(scale=0.5)
      }
    else:
      regularizers = {"w": contrib_layers.l1_regularizer(scale=0.5)}

    model = module(output_channels=self.output_channels,
                   kernel_shapes=self.kernel_shapes,
                   strides=self.strides,
                   paddings=self.paddings,
                   use_bias=use_bias,
                   regularizers=regularizers)

    input_to_net = tf.random_normal(dtype=tf.float32, shape=(1, 100, 100, 3))
    model(input_to_net)

    regularizers = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
    expected_num_regularizers = 3 * (2 if use_bias else 1)
    self.assertLen(regularizers, expected_num_regularizers)
    if not tf.executing_eagerly():
      self.assertRegexpMatches(regularizers[0].name, ".*l1_regularizer.*")
      if use_bias:
        self.assertRegexpMatches(regularizers[1].name, ".*l2_regularizer.*")
Beispiel #4
0
    def prepare(self):
        self.conv_kernel_initializer = layers.xavier_initializer()
        if config.conv_kernel_regularizer_scale > 0:
            self.conv_kernel_regularizer = layers.l2_regularizer(
                scale=config.conv_kernel_regularizer_scale)
        else:
            self.conv_kernel_regularizer = None
        if config.conv_activity_regularizer_scale > 0:
            self.conv_activity_regularizer = layers.l1_regularizer(
                scale=config.conv_activity_regularizer_scale)
        else:
            self.conv_activity_regularizer = None

        self.fc_kernel_initializer = tf.random_uniform_initializer(
            minval=-config.fc_kernel_initializer_scale,
            maxval=config.fc_kernel_initializer_scale)
        if self.trainable and config.fc_kernel_regularizer_scale > 0:
            self.fc_kernel_regularizer = layers.l2_regularizer(
                scale=config.fc_kernel_regularizer_scale)
        else:
            self.fc_kernel_regularizer = None
        if self.trainable and config.fc_activity_regularizer_scale > 0:
            self.fc_activity_regularizer = layers.l1_regularizer(
                scale=config.fc_activity_regularizer_scale)
        else:
            self.fc_activity_regularizer = None
Beispiel #5
0
  def testRegularizersInRegularizationLosses(self, offset, scale):
    regularizers = {}
    if offset:
      regularizers["beta"] = contrib_layers.l1_regularizer(scale=0.5)
    if scale:
      regularizers["gamma"] = contrib_layers.l2_regularizer(scale=0.5)

    inputs_shape = [10, 10]
    inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape)
    bn = snt.BatchNormV2(
        offset=offset,
        scale=scale,
        regularizers=regularizers)
    self.assertEqual(bn.regularizers, regularizers)
    bn(inputs, is_training=True)

    graph_regularizers = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
    if not offset and not scale:
      self.assertFalse(graph_regularizers)
    if offset and not scale:
      self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*")
    if scale and not offset:
      self.assertRegexpMatches(graph_regularizers[0].name, ".*l2_regularizer.*")
    if scale and offset:
      self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*")
      self.assertRegexpMatches(graph_regularizers[1].name, ".*l2_regularizer.*")
    def testRegularizers(self, trainable, state_size):
        batch_size = 6

        # Set the attribute to the class since it we can't set properties of
        # abstract classes
        snt.RNNCore.state_size = state_size
        flat_state_size = nest.flatten(state_size)
        core = snt.RNNCore(name="dummy_core")
        flat_regularizer = ([contrib_layers.l1_regularizer(scale=0.5)] *
                            len(flat_state_size))
        trainable_regularizers = nest.pack_sequence_as(
            structure=state_size, flat_sequence=flat_regularizer)

        core.initial_state(batch_size,
                           dtype=tf.float32,
                           trainable=trainable,
                           trainable_regularizers=trainable_regularizers)

        graph_regularizers = tf.get_collection(
            tf.GraphKeys.REGULARIZATION_LOSSES)
        if not trainable:
            self.assertFalse(graph_regularizers)
        else:
            self.assertEqual(len(graph_regularizers), len(flat_state_size))
            if not tf.executing_eagerly():
                for i in range(len(flat_state_size)):
                    self.assertRegexpMatches(graph_regularizers[i].name,
                                             ".*l1_regularizer.*")
Beispiel #7
0
def GetVaribleWithRegularization(Shape,
                                 Init,
                                 Name,
                                 Regular='L2',
                                 RegularScale=None,
                                 Device='cpu'):
    if RegularScale is not None:
        if Regular == 'L2':
            Regularizer = tfclayers.l2_regularizer(RegularScale)
        elif regular == 'L1':
            Regularizer = tfclayers.l1_regularizer(
                RegularScaRegularizRegularizerle)
        if Device is 'cpu':
            with tf.device('/cpu:0'):
                Dtype = tf.float32
                Variable = tf.get_variable(Name,
                                           Shape,
                                           Dtype,
                                           Init,
                                           regularizer=Regularizer)
                return Variable
        elif Device is 'gpu':
            with tf.device('/gpu:0'):
                Dtype = tf.float32
                Variable = tf.get_variable(Name,
                                           Shape,
                                           Dtype,
                                           Init,
                                           regularizer=Regularizer)
                return Variable
    else:
        return GetVarible(Shape, Init, Name, Device)
Beispiel #8
0
    def __init__(self, config, params):
        self.input_features_slicer = config.get_as_slice("FEATURES", "columns")

        #self.l1_reg = [config.getfloat("TRAINING", "l1_regularization", fallback=0.0)]
        #self.l2_reg = [config.getfloat("TRAINING", "l2_regularization", fallback=0.0)]
        self.l1_reg = params['l1_reg']
        self.l2_reg = params['l2_reg']
        self.learning_rate = params['learning_rate']
        self.l1_l2_regularizer = lambda t: tf.add(
            l1_regularizer(self.l1_reg)(t),
            l2_regularizer(self.l2_reg)(t))
        #self.num_hidden_units = config.getint("NETWORK", "layer_size")
        #self.num_layers = config.getint("NETWORK", "num_layers")
        self.num_hidden_units = params['layer_size']
        self.num_layers = params['num_layers']
        #self.learning_rate = config.getfloat("TRAINING", "learning_rate")
        self.is_residual = config.getboolean("TRAINING",
                                             "residual",
                                             fallback=False)

        self.batch_norm = config.getboolean("NETWORK",
                                            "batch_norm",
                                            fallback=True)

        self.optimizer = config.get("TRAINING", "optimizer")

        self.config_task_sections = get_task_sections(config)

        self.add_placeholders()
Beispiel #9
0
    def define_graph(self):
        with tf.variable_scope('activations'):
            self.x = tf.placeholder(name='x',
                                    dtype=tf.float32,
                                    shape=[None, self.D])
            h = self.x
            self.tensors = [h]
            for i in xrange(len(self.Ws)):
                W = self.Ws[i]
                b = self.bs[i]
                h = tf.matmul(h, W) + b
                if b.shape[0] == self.d:
                    h = tf.identity(h, name='h_' + str(i + 1))
                elif b.shape[0] == 1:
                    h = tf.identity(h, name='y')
                    self.y = h
                else:
                    h = tf.nn.relu(h, name='h_' + str(i + 1))
                self.tensors.append(h)
        self.ytrue = tf.placeholder(name='ytrue',
                                    dtype=tf.float32,
                                    shape=[None, 1])

        with tf.variable_scope('optimizer'):
            self.mse = tf.reduce_mean(tf.squared_difference(
                self.y, self.ytrue))
            self.l1reg = l1_regularizer(self.lmbda)
            self.l2reg = l2_regularizer(self.lmbda)
            self.regparams = tf.add_n(
                [self.l1reg(W) + self.l2reg(W) for W in self.Ws])
            self.loss = self.mse + self.regparams
            self.opt = tf.train.AdamOptimizer(self.baselr)
            self.step = self.opt.minimize(self.loss)
Beispiel #10
0
  def testInvalidRegularizationParameters(self):
    with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
      snt.BatchNormV2(
          regularizers={"not_gamma": contrib_layers.l1_regularizer(0.5)})

    err = "Regularizer for 'gamma' is not a callable function"
    with self.assertRaisesRegexp(TypeError, err):
      snt.BatchNormV2(regularizers={"gamma": tf.zeros([1, 2, 3])})
Beispiel #11
0
    def testRegularizersInRegularizationLosses(self):
        regularizer = contrib_layers.l1_regularizer(scale=0.5)
        embed = snt.Embed(vocab_size=self._vocab_size,
                          embed_dim=self._embed_dim,
                          regularizers={"embeddings": regularizer})
        embed(tf.convert_to_tensor(self._ids))

        regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        self.assertRegexpMatches(regularizers[0].name, ".*l1_regularizer.*")
Beispiel #12
0
  def testInvalidRegularizationParameters(self):
    with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
      snt.nets.AlexNetMini(
          regularizers={"not_w": contrib_layers.l1_regularizer(scale=0.5)})

    err = "Regularizer for 'w' is not a callable function"
    with self.assertRaisesRegexp(TypeError, err):
      snt.nets.AlexNetMini(
          regularizers={"w": tf.zeros([1, 2, 3])})
    def train_op(self):
        name = None

        with tf.variable_scope(name, default_name='train_op'):
            global_step = tf.Variable(0, trainable=False)
            global_step_add_1 = tf.assign_add(global_step, 1)
            lr_initial = 0.01
            lr_decay = tf.train.exponential_decay(lr_initial,
                                                  global_step,
                                                  decay_steps=1000,
                                                  decay_rate=0.999)
            tf.summary.scalar("lr", lr_decay)

            add_to_weights()

            # regularization
            l1_norm = layers.l1_regularizer(0.001)
            l2_norm_ = layers.l2_regularizer(0.001)
            l2_norm = layers.sum_regularizer([l1_norm, l2_norm_])

            regularization = layers.apply_regularization(
                l2_norm, tf.get_collection(tf.GraphKeys.WEIGHTS))

            if self.logits is None:
                raise ValueError("you must call inference first!")
            self.loss = tf.reduce_mean(
                tf.nn.ctc_loss(
                    self.sparse_labels,
                    self.logits,
                    sequence_length=self.seq_lengths)) + regularization
            tf.summary.scalar("loss", self.loss)
            self.edit_distance = tf.reduce_mean(
                tf.edit_distance(tf.cast(self.prediction[0], tf.int32),
                                 self.sparse_labels))
            tf.summary.scalar("edit_distance", self.edit_distance)
            # lr 0.01 0.002
            with tf.control_dependencies([global_step_add_1]):
                #opt = tf.train.RMSPropOptimizer(0.01, momentum=0.99)
                opt = tf.train.GradientDescentOptimizer(lr_decay)
                gradients = tf.gradients(self.loss, tf.trainable_variables())
                # avoiding gradient exploding
                gradients = [
                    tf.clip_by_value(gradient, -1, 1) for gradient in gradients
                ]
                self.optimizer = opt.apply_gradients(
                    zip(gradients, tf.trainable_variables()))

            with tf.name_scope('gradients_summary'):
                for gradient in gradients:
                    tf.summary.histogram(gradient.name, gradient)
            with tf.name_scope('value_summary'):
                for val in tf.trainable_variables():
                    tf.summary.histogram(val.name, val)
            self.merge_summary = tf.summary.merge_all()
Beispiel #14
0
    def testInvalidRegularizationParameters(self):
        regularizer = contrib_layers.l1_regularizer(scale=0.5)
        with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
            snt.Embed(vocab_size=self._vocab_size,
                      embed_dim=self._embed_dim,
                      regularizers={"not_embeddings": regularizer})

        err = "Regularizer for 'embeddings' is not a callable function"
        with self.assertRaisesRegexp(TypeError, err):
            snt.Embed(vocab_size=self._vocab_size,
                      embed_dim=self._embed_dim,
                      regularizers={"embeddings": tf.zeros([1, 2, 3])})
    def testInvalidRegularizationParameters(self):
        regularizer = contrib_layers.l1_regularizer(scale=0.5)
        with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"):
            self.setUpWithNumOutputClasses(1)
            snt.nets.Dilation(num_output_classes=self._num_output_classes,
                              regularizers={"not_w": regularizer})

        err = "Regularizer for 'w' is not a callable function"
        with self.assertRaisesRegexp(TypeError, err):
            self.setUpWithNumOutputClasses(1)
            snt.nets.Dilation(num_output_classes=self._num_output_classes,
                              regularizers={"w": tf.zeros([1, 2, 3])})
    def prepare(self):
        """ Setup the weight initalizers and regularizers. """
        config = self.config

        self.conv_kernel_initializer = layers.xavier_initializer()
        '''Be designed to keep the scale of the gradients roughly the same in all layers.
           From paper: 
           Understanding the difficulty of training deep feedforward neural networks. 
           International conference on artificial intelligence and statistics.
         '''

        # L1 regularizer : Lasso Regression (sbsolute sum of para.)
        # L2 regularizer : Ridge Regression (square sum of para.)
        if self.train_cnn and config.conv_kernel_regularizer_scale > 0:
            self.conv_kernel_regularizer = layers.l2_regularizer(
                scale=config.conv_kernel_regularizer_scale)
        else:
            self.conv_kernel_regularizer = None

        if self.train_cnn and config.conv_activity_regularizer_scale > 0:
            self.conv_activity_regularizer = layers.l1_regularizer(
                scale=config.conv_activity_regularizer_scale)
        else:
            self.conv_activity_regularizer = None

        self.fc_kernel_initializer = tf.random_uniform_initializer(
            minval=-config.fc_kernel_initializer_scale,
            maxval=config.fc_kernel_initializer_scale)

        if self.is_train and config.fc_kernel_regularizer_scale > 0:
            self.fc_kernel_regularizer = layers.l2_regularizer(
                scale=config.fc_kernel_regularizer_scale)
        else:
            self.fc_kernel_regularizer = None

        if self.is_train and config.fc_activity_regularizer_scale > 0:
            self.fc_activity_regularizer = layers.l1_regularizer(
                scale=config.fc_activity_regularizer_scale)
        else:
            self.fc_activity_regularizer = None
Beispiel #17
0
def _get_regularizer(regularizer, scale=0):
    if callable(regularizer):
        return regularizer(scale)
    if isinstance(regularizer, list):
        r_list = []
        for reg in regularizer:
            r_list.append(_get_regularizer(reg, scale))
        return tflayers.sum_regularizer(r_list)

    elif regularizer == 'l1':
        return tflayers.l1_regularizer(scale)
    elif regularizer == 'l2':
        return tflayers.l2_regularizer(scale)
Beispiel #18
0
    def __init__(self, trainable, is_train, hparams):

        self.trainable = trainable
        self.is_train = is_train
        self.hparams = hparams

        self.conv_kernel_initializer = layers.xavier_initializer()

        if trainable and hparams.conv_kernel_regularizer_scale > 0:
            self.conv_kernel_regularizer = layers.l2_regularizer(
                scale=hparams.conv_kernel_regularizer_scale)
        else:
            self.conv_kernel_regularizer = None

        if trainable and hparams.conv_activity_regularizer_scale > 0:
            self.conv_activity_regularizer = layers.l1_regularizer(
                scale=hparams.conv_activity_regularizer_scale)
        else:
            self.conv_activity_regularizer = None

        self.fc_kernel_initializer = tf.random_uniform_initializer(
            minval=-hparams.dense_kernel_initializer_scale,
            maxval=hparams.dense_kernel_initializer_scale)

        if trainable and hparams.dense_kernel_regularizer_scale > 0:
            self.fc_kernel_regularizer = layers.l2_regularizer(
                scale=hparams.dense_kernel_regularizer_scale)
        else:
            self.fc_kernel_regularizer = None

        if trainable and hparams.dense_activity_regularizer_scale > 0:
            self.fc_activity_regularizer = layers.l1_regularizer(
                scale=hparams.dense_activity_regularizer_scale)
        else:
            self.fc_activity_regularizer = None

        self.dense_drop_rate = hparams.dense_drop_rate
Beispiel #19
0
  def testRegularizersInRegularizationLosses(self):
    regularizers = {
        "gamma": contrib_layers.l1_regularizer(scale=0.5),
        "beta": contrib_layers.l2_regularizer(scale=0.5),
    }

    inputs = tf.placeholder(tf.float32, shape=[None, 10])
    ln = snt.LayerNorm(regularizers=regularizers)
    self.assertEqual(ln.regularizers, regularizers)
    ln(inputs)

    graph_regularizers = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)
    self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*")
    self.assertRegexpMatches(graph_regularizers[1].name, ".*l2_regularizer.*")
Beispiel #20
0
    def setUp(self):
        super(MLPTest, self).setUp()

        self.output_sizes = [11, 13, 17]
        self.batch_size = 5
        self.input_size = 7
        self.module_name = "mlp"
        self.initializers = {
            "w": tf.truncated_normal_initializer(stddev=1.0),
        }
        self.regularizers = {
            "w": contrib_layers.l1_regularizer(scale=0.1),
        }
        self.partitioners = {
            "w": tf.fixed_size_partitioner(num_shards=2),
        }
def conv(data,
         kernel_shape,
         activation,
         name,
         dropout=None,
         regularizer=None,
         reg_val=0):
    """ Convolution layer.
    Parameters:
        data: The input data.
        kernel_shape: The kernel_shape of current convolutional layer.
        activation: The activation function.
        name: The name of current layer.
        dropout: Whether do the dropout work.
        regularizer: Whether use the L2 or L1 regularizer.
        reg_val: regularizer value.
    Return:
        conv_out: The output of current layer.
    """
    if regularizer == 'L1':
        regularizer = layers.l1_regularizer(reg_val)
    elif regularizer == 'L2':
        regularizer = layers.l2_regularizer(reg_val)

    with tf.name_scope(name):
        # Convolution layer 1.
        with tf.variable_scope('conv_weights', regularizer=regularizer):
            conv_weights = tf.Variable(
                tf.truncated_normal(kernel_shape, stddev=0.1,
                                    dtype=tf.float32))
            variable_summaries(conv_weights)
        with tf.variable_scope('conv_bias'):
            conv_biases = tf.Variable(
                tf.constant(0.0, dtype=tf.float32, shape=[kernel_shape[3]]))
        with tf.name_scope('conv'):
            conv = tf.nn.conv2d(data,
                                conv_weights,
                                strides=[1, 1, 1, 1],
                                padding='SAME')
        with tf.name_scope('activation'):
            conv_out = activation(tf.nn.bias_add(conv, conv_biases))
            variable_summaries(conv_out)
        if dropout is not None:
            with tf.name_scope('dropout'):
                conv_out = tf.nn.dropout(conv_out, dropout)

        return conv_out
def inference(data, keep_prob, sample_size, training=True, reuse=False, output_name='prediction'):
    """
    Define the deep neural network used for inference
    """

    # slice 709 elements to get the correct size for the next convolutions
    laser = tf.slice(data, [0,0], [sample_size,1080])
    goal = tf.slice(data, [0,1080], [sample_size,3])

    laser = tf.reshape(laser, [sample_size, 1, 1080, 1])
    hidden_1 = conv2d(laser, 64, [1,7], stride=3, normalizer_fn=batch_norm,
            weights_initializer=xavier_initializer_conv2d(), 
            weights_regularizer=l1_regularizer(0.001), reuse=reuse, trainable=training, scope='layer_scope_1')
    hidden_1 = contrib.layers.max_pool2d(hidden_1, [1,3],[1,3], 'SAME')
    hidden_2 = conv2d(hidden_1, 64, [1,3], normalizer_fn=batch_norm,
            weights_initializer=xavier_initializer_conv2d(),
            weights_regularizer=l1_regularizer(0.001), reuse=reuse, trainable=training, scope='layer_scope_2')
    hidden_3 = conv2d(hidden_2, 64, [1,3], activation_fn=None, normalizer_fn=batch_norm,
            weights_initializer=xavier_initializer_conv2d(), 
            weights_regularizer=l1_regularizer(0.001), reuse=reuse, trainable=training, scope='layer_scope_3')
    hidden_3 = tf.nn.relu(hidden_3 + hidden_1)
    hidden_4 = conv2d(hidden_3, 64, [1,3], normalizer_fn=batch_norm,
            weights_initializer=xavier_initializer_conv2d(),
            weights_regularizer=l1_regularizer(0.001), reuse=reuse, trainable=training, scope='layer_scope_4')
    hidden_5 = conv2d(hidden_4, 64, [1,3], activation_fn=None, normalizer_fn=batch_norm,
            weights_initializer=xavier_initializer_conv2d(), 
            weights_regularizer=l1_regularizer(0.001), reuse=reuse, trainable=training, scope='layer_scope_5')
    hidden_5 = tf.nn.relu(hidden_5 + hidden_3)

    pooling = contrib.layers.avg_pool2d(hidden_5, [1,3],[1,3], 'SAME')
    pooling = contrib.layers.flatten(pooling)
    combined = tf.concat(1,[pooling, goal])
    fc_5 = fully_connected(combined, 1024, weights_initializer=xavier_initializer(),
            weights_regularizer=l1_regularizer(0.001), reuse=reuse, trainable=training, scope='fc_scope_5')
    fc_6 = fully_connected(fc_5, 1024, weights_initializer=xavier_initializer(),
            weights_regularizer=l1_regularizer(0.001), reuse=reuse, trainable=training, scope='fc_scope_6')
    fc_7 = fully_connected(fc_6, 512, weights_initializer=xavier_initializer(),
            weights_regularizer=l1_regularizer(0.001), reuse=reuse, trainable=training, scope='fc_scope_7')
    prediction = fully_connected(fc_7, CMD_SIZE, activation_fn=None, reuse=reuse, trainable=training, scope='layer_scope_pred')

    prediction = tf.identity(prediction, name=output_name)

    return prediction
Beispiel #23
0
  def testRegularizersInRegularizationLosses(self):
    regularizers = {
        "w": contrib_layers.l1_regularizer(scale=0.5),
        "b": contrib_layers.l2_regularizer(scale=0.5)
    }

    alex_net = snt.nets.AlexNetMini(
        regularizers=regularizers, name="alexnet1")

    input_shape = [alex_net._min_size, alex_net._min_size, 3]
    inputs = tf.ones(dtype=tf.float32, shape=[1] + input_shape)
    alex_net(inputs)

    graph_regularizers = tf.get_collection(
        tf.GraphKeys.REGULARIZATION_LOSSES)

    alex_net_conv_layers = len(alex_net.conv_modules)
    self.assertEqual(len(graph_regularizers), 2 * alex_net_conv_layers)
Beispiel #24
0
    def prepare(self):
        config = self.config

        self.fc_kernel_initializer = tf.random_uniform_initializer(
            minval=-config.fc_kernel_initializer_scale,
            maxval=config.fc_kernel_initializer_scale)

        if self.is_train and config.fc_kernel_regularizer_scale > 0:
            self.fc_kernel_regularizer = layers.l2_regularizer(
                scale=config.fc_kernel_regularizer_scale)
        else:
            self.fc_kernel_regularizer = None

        if self.is_train and config.fc_activity_regularizer_scale > 0:
            self.fc_activity_regularizer = layers.l1_regularizer(
                scale=config.fc_activity_regularizer_scale)
        else:
            self.fc_activity_regularizer = None
Beispiel #25
0
def _build_regularizer(regularizer):
  """Builds a regularizer from config.

  Args:
    regularizer: hyperparams_pb2.Hyperparams.regularizer proto.

  Returns:
    regularizer.

  Raises:
    ValueError: On unknown regularizer.
  """
  regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
  if  regularizer_oneof == 'l1_regularizer':
    return layers.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
  if regularizer_oneof == 'l2_regularizer':
    return layers.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
  raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def hidden(data,
           activation,
           name,
           hidden_units,
           dropout=None,
           regularizer=None,
           reg_val=None):
    """ Hidden layer.
    Parameters:
        data: The input data.
        activation: The activation function.
        name: The layer's name.
        hidden_units: Number of hidden_out units.
        dropout: Whether do the dropout job.
        regularizer: Whether use the L2 or L1 regularizer.
        reg_val: regularizer value.
    Return:
        hidden_out: Output of current layer.
    """
    if regularizer == 'L1':
        regularizer = layers.l1_regularizer(reg_val)
    elif regularizer == 'L2':
        regularizer = layers.l2_regularizer(reg_val)

    with tf.name_scope(name):
        # Fully connected layer 1. Note that the '+' operation automatically.
        with tf.variable_scope('fc_weights', regularizer=regularizer):
            input_units = int(data.shape[1])
            fc_weights = tf.Variable(  # fully connected, depth 512.
                tf.truncated_normal([input_units, hidden_units],
                                    stddev=0.1,
                                    dtype=tf.float32))
            variable_summaries(fc_weights)
        with tf.name_scope('fc_bias'):
            fc_biases = tf.Variable(
                tf.constant(0.0, dtype=tf.float32, shape=[hidden_units]))
            variable_summaries(fc_biases)
        with tf.name_scope('activation'):
            hidden_out = activation(
                tf.nn.xw_plus_b(data, fc_weights, fc_biases))
            variable_summaries(hidden_out)
        if dropout is not None:
            hidden_out = tf.nn.dropout(hidden_out, dropout)
        return hidden_out
Beispiel #27
0
  def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    self.device_assigner = (
        device_assigner or tensor_forest.RandomForestDeviceAssigner())

    self.params = params

    self.optimizer = optimizer_class(self.params.learning_rate)

    self.is_regression = params.regression

    self.regularizer = None
    if params.regularization == "l1":
      self.regularizer = layers.l1_regularizer(
          self.params.regularization_strength)
    elif params.regularization == "l2":
      self.regularizer = layers.l2_regularizer(
          self.params.regularization_strength)
Beispiel #28
0
    def __init__(self,
                 params,
                 device_assigner=None,
                 optimizer_class=adagrad.AdagradOptimizer,
                 **kwargs):

        self.device_assigner = (device_assigner
                                or framework_variables.VariableDeviceChooser())

        self.params = params

        self.optimizer = optimizer_class(self.params.learning_rate)

        self.is_regression = params.regression

        self.regularizer = None
        if params.regularization == "l1":
            self.regularizer = layers.l1_regularizer(
                self.params.regularization_strength)
        elif params.regularization == "l2":
            self.regularizer = layers.l2_regularizer(
                self.params.regularization_strength)
Beispiel #29
0
  def __init__(self,
               params,
               device_assigner=None,
               optimizer_class=adagrad.AdagradOptimizer,
               **kwargs):

    self.device_assigner = (
        device_assigner or framework_variables.VariableDeviceChooser())

    self.params = params

    self.optimizer = optimizer_class(self.params.learning_rate)

    self.is_regression = params.regression

    self.regularizer = None
    if params.regularization == "l1":
      self.regularizer = layers.l1_regularizer(
          self.params.regularization_strength)
    elif params.regularization == "l2":
      self.regularizer = layers.l2_regularizer(
          self.params.regularization_strength)
    def testRegularizersInRegularizationLosses(self):
        w_regularizer = contrib_layers.l1_regularizer(scale=0.5)
        b_regularizer = contrib_layers.l2_regularizer(scale=0.5)
        self.setUpWithNumOutputClasses(1)
        dilation_mod = snt.nets.Dilation(
            num_output_classes=self._num_output_classes,
            regularizers={
                "w": w_regularizer,
                "b": b_regularizer
            })
        dilation_mod(tf.convert_to_tensor(self._images))

        regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

        # There are two regularizers per level
        layers_number = 8
        self.assertEqual(len(regularizers), 2 * layers_number)
        if not tf.executing_eagerly():
            for i in range(0, 2 * layers_number, 2):
                self.assertRegexpMatches(regularizers[i].name,
                                         ".*l1_regularizer.*")
                self.assertRegexpMatches(regularizers[i + 1].name,
                                         ".*l2_regularizer.*")
Beispiel #31
0
def l1_regularizer(scale=1.0):
    return contrib_layers.l1_regularizer(scale=scale)
Beispiel #32
0
def l1_regularizer(scale=1.0):
    return contrib_layers.l1_regularizer(scale=scale)