def __init__(self,
                 decay=1e-6,
                 affine_w_initializer=None,
                 affine_b_initializer=None,
                 acti_func='relu',
                 name='inet-affine'):
        """
        This network estimates affine transformations from
        a pair of moving and fixed image:

            Hu et al., Label-driven weakly-supervised learning for
            multimodal deformable image registration, arXiv:1711.01666
            https://arxiv.org/abs/1711.01666

        :param decay:
        :param affine_w_initializer:
        :param affine_b_initializer:
        :param acti_func:
        :param name:
        """

        BaseNet.__init__(self, name=name)

        self.fea = [4, 8, 16, 32, 64]
        self.k_conv = 3
        self.affine_w_initializer = affine_w_initializer
        self.affine_b_initializer = affine_b_initializer
        self.res_param = {
            'w_initializer': GlorotUniform.get_instance(''),
            'w_regularizer': regularizers.l2_regularizer(decay),
            'acti_func': acti_func}
        self.affine_param = {
            'w_regularizer': regularizers.l2_regularizer(decay),
            'b_regularizer': None}
Example #2
0
    def test_3d_reg_shape(self):
        x = self.get_3d_input()

        unet_block_op = UNetBlock(
            'DOWNSAMPLE', (32, 64), (3, 3), with_downsample_branch=True,
            w_regularizer=regularizers.l2_regularizer(0.3))
        out_1, out_2 = unet_block_op(x, is_training=True)
        print(unet_block_op)
        print(out_1)
        print(out_2)

        unet_block_op = UNetBlock(
            'UPSAMPLE', (32, 64), (3, 3), with_downsample_branch=False,
            w_regularizer=regularizers.l2_regularizer(0.3))
        out_3, _ = unet_block_op(x, is_training=True)
        print(unet_block_op)
        print(out_3)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out_1 = sess.run(out_1)
            self.assertAllClose((2, 8, 8, 8, 64), out_1.shape)
            out_2 = sess.run(out_2)
            self.assertAllClose((2, 16, 16, 16, 64), out_2.shape)
            out_3 = sess.run(out_3)
            self.assertAllClose((2, 32, 32, 32, 64), out_3.shape)
Example #3
0
    def test_3d_reg_shape(self):
        x = self.get_3d_data()
        vnet_block_op = VNetBlock('DOWNSAMPLE', 2, 16, 8,
                                  w_regularizer=regularizers.l2_regularizer(
                                      0.2))
        out_1, out_2 = vnet_block_op(x, x)
        print(vnet_block_op)

        vnet_block_op = VNetBlock('UPSAMPLE', 2, 16, 8,
                                  w_regularizer=regularizers.l2_regularizer(
                                      0.2))
        out_3, out_4 = vnet_block_op(x, x)
        print(vnet_block_op)

        vnet_block_op = VNetBlock('SAME', 2, 16, 8,
                                  w_regularizer=regularizers.l2_regularizer(
                                      0.2))
        out_5, out_6 = vnet_block_op(x, x)
        print(vnet_block_op)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out_1 = sess.run(out_1)
            self.assertAllClose((2, 16, 16, 16, 16), out_1.shape)
            out_2 = sess.run(out_2)
            self.assertAllClose((2, 8, 8, 8, 8), out_2.shape)
            out_3 = sess.run(out_3)
            self.assertAllClose((2, 16, 16, 16, 16), out_3.shape)
            out_4 = sess.run(out_4)
            self.assertAllClose((2, 32, 32, 32, 8), out_4.shape)
            out_5 = sess.run(out_5)
            self.assertAllClose((2, 16, 16, 16, 16), out_5.shape)
            out_6 = sess.run(out_6)
            self.assertAllClose((2, 16, 16, 16, 8), out_6.shape)
Example #4
0
 def test_fc_2d_bias_reg_shape(self):
     input_param = {'n_output_chns': 10,
                    'with_bias': True,
                    'w_regularizer': regularizers.l2_regularizer(0.5),
                    'b_regularizer': regularizers.l2_regularizer(0.5)}
     self._test_fc_output_shape(rank=2,
                                param_dict=input_param,
                                output_shape=(2, 10))
Example #5
0
 def test_fclayer_3d_bias_reg_shape(self):
     input_param = {'n_output_chns': 10,
                    'with_bn': False,
                    'w_regularizer': regularizers.l2_regularizer(0.5),
                    'b_regularizer': regularizers.l2_regularizer(0.5)}
     self._test_fc_layer_output_shape(rank=3,
                                      param_dict=input_param,
                                      output_shape=(2, 10))
Example #6
0
 def test_fclayer_2d_bn_reg_shape(self):
     input_param = {'n_output_chns': 10,
                    'with_bias': False,
                    'with_bn': True,
                    'w_regularizer': regularizers.l2_regularizer(0.5),
                    'b_regularizer': regularizers.l2_regularizer(0.5)}
     self._test_fc_layer_output_shape(rank=2,
                                      param_dict=input_param,
                                      output_shape=(2, 10),
                                      is_training=True)
Example #7
0
 def test_deconv_3d_bias_reg_shape(self):
     input_param = {'n_output_chns': 10,
                    'kernel_size': 3,
                    'stride': [2, 2, 1],
                    'with_bias': True,
                    'w_regularizer': regularizers.l2_regularizer(0.5),
                    'b_regularizer': regularizers.l2_regularizer(0.5)}
     self._test_deconv_output_shape(rank=3,
                                    param_dict=input_param,
                                    output_shape=(2, 32, 32, 16, 10))
Example #8
0
 def test_convlayer_2d_bias_reg_shape(self):
     input_param = {'n_output_chns': 10,
                    'kernel_size': [3, 5],
                    'stride': [2, 1],
                    'with_bias': True,
                    'with_bn': False,
                    'w_regularizer': regularizers.l2_regularizer(0.5),
                    'b_regularizer': regularizers.l2_regularizer(0.5)}
     self._test_conv_layer_output_shape(rank=2,
                                        param_dict=input_param,
                                        output_shape=(2, 8, 16, 10))
Example #9
0
 def test_convlayer_3d_bn_reg_shape(self):
     input_param = {'n_output_chns': 10,
                    'kernel_size': [5, 1, 2],
                    'stride': 1,
                    'with_bias': False,
                    'with_bn': True,
                    'w_regularizer': regularizers.l2_regularizer(0.5),
                    'b_regularizer': regularizers.l2_regularizer(0.5)}
     self._test_conv_layer_output_shape(rank=3,
                                        param_dict=input_param,
                                        output_shape=(2, 16, 16, 16, 10),
                                        is_training=True)
Example #10
0
 def test_convlayer_2d_bn_reg_shape(self):
     input_param = {'n_output_chns': 10,
                    'kernel_size': [3, 5],
                    'stride': [2, 1],
                    'with_bias': False,
                    'feature_normalization': 'batch',
                    'w_regularizer': regularizers.l2_regularizer(0.5),
                    'b_regularizer': regularizers.l2_regularizer(0.5)}
     self._test_conv_layer_output_shape(rank=2,
                                        param_dict=input_param,
                                        output_shape=(2, 8, 16, 10),
                                        is_training=True)
Example #11
0
 def test_fclayer_3d_bn_reg_shape(self):
     input_param = {
         'n_output_chns': 10,
         'with_bias': False,
         'feature_normalization': 'batch',
         'w_regularizer': regularizers.l2_regularizer(0.5),
         'b_regularizer': regularizers.l2_regularizer(0.5)
     }
     self._test_fc_layer_output_shape(rank=3,
                                      param_dict=input_param,
                                      output_shape=(2, 10),
                                      is_training=True)
Example #12
0
File: MAML.py Project: xxyy1/MAML
    def item_scores(self):
        # (N_USER_IDS, 1, K)
        user = tf.expand_dims(tf.nn.embedding_lookup(self.user_embeddings,
                                                     self.score_user_ids),
                              1,
                              name='user_test')
        # (1, N_ITEM, K)
        item = tf.tile(tf.expand_dims(self.item_embeddings, 0),
                       [tf.shape(user)[0], 1, 1],
                       name='item_test')
        feature = tf.tile(tf.expand_dims(self.feature_projection, 0),
                          [tf.shape(user)[0], 1, 1],
                          name='feature_test')
        input = tf.concat([
            tf.reshape(tf.tile(user, [1, tf.shape(item)[1], 1]),
                       [-1, self.embed_dim]),
            tf.reshape(item, [-1, self.embed_dim]),
            tf.reshape(feature, [-1, self.embed_dim])
        ],
                          1,
                          name='input_test')

        with tf.variable_scope('dense'):
            hidden_layer = tf.layers.dense(
                inputs=tf.nn.l2_normalize(input, dim=1),
                units=5 * self.embed_dim,
                trainable=False,
                kernel_regularizer=regularizers.l2_regularizer(100.0),
                activation=tf.nn.tanh,
                name='hidden_layer',
                reuse=True)

            hidden_layer1 = tf.layers.dense(
                inputs=tf.nn.l2_normalize(hidden_layer, dim=1),
                units=1 * self.embed_dim,
                trainable=False,
                kernel_regularizer=regularizers.l2_regularizer(100.0),
                activation=tf.nn.relu,
                name='hidden_layer1',
                reuse=True)
            attention_layer_score = self.embed_dim * tf.nn.softmax(
                hidden_layer1, dim=-1)

        attention_reshape = tf.reshape(
            attention_layer_score, [-1, tf.shape(item)[1], self.embed_dim],
            name='attention_test')
        scores = -tf.reduce_sum(tf.squared_difference(
            tf.multiply(attention_reshape, user),
            tf.multiply(attention_reshape, item)),
                                2,
                                name="scores")
        top_n = tf.nn.top_k(scores, 10 + self.max_train_count, name='top_n')
        return top_n
Example #13
0
 def test_deconv_3d_bias_reg_shape(self):
     input_param = {
         'n_output_chns': 10,
         'kernel_size': 3,
         'stride': [2, 2, 1],
         'with_bias': True,
         'w_regularizer': regularizers.l2_regularizer(0.5),
         'b_regularizer': regularizers.l2_regularizer(0.5)
     }
     self._test_deconv_output_shape(rank=3,
                                    param_dict=input_param,
                                    output_shape=(2, 32, 32, 16, 10))
Example #14
0
 def test_convlayer_2d_bias_reg_shape(self):
     input_param = {
         'n_output_chns': 10,
         'kernel_size': [3, 5],
         'stride': [2, 1],
         'with_bias': True,
         'with_bn': False,
         'w_regularizer': regularizers.l2_regularizer(0.5),
         'b_regularizer': regularizers.l2_regularizer(0.5)
     }
     self._test_conv_layer_output_shape(rank=2,
                                        param_dict=input_param,
                                        output_shape=(2, 8, 16, 10))
Example #15
0
 def test_convlayer_3d_relu_shape(self):
     input_param = {'n_output_chns': 10,
                    'kernel_size': [5, 1, 2],
                    'stride': [1, 2, 2],
                    'with_bias': False,
                    'feature_normalization': 'batch',
                    'acti_func': 'relu',
                    'w_regularizer': regularizers.l2_regularizer(0.5),
                    'b_regularizer': regularizers.l2_regularizer(0.5)}
     self._test_conv_layer_output_shape(rank=3,
                                        param_dict=input_param,
                                        output_shape=(2, 16, 8, 8, 10),
                                        is_training=True)
Example #16
0
 def test_convlayer_3d_bn_reg_shape(self):
     input_param = {
         'n_output_chns': 10,
         'kernel_size': [5, 1, 2],
         'stride': 1,
         'with_bias': False,
         'with_bn': True,
         'w_regularizer': regularizers.l2_regularizer(0.5),
         'b_regularizer': regularizers.l2_regularizer(0.5)
     }
     self._test_conv_layer_output_shape(rank=3,
                                        param_dict=input_param,
                                        output_shape=(2, 16, 16, 16, 10),
                                        is_training=True)
Example #17
0
def discriminator_fn(code, code2, hidden_units):

    kwargs = {
        'kernel_regularizer': l2_regularizer(1e-4),
        'bias_regularizer': l2_regularizer(1e-4)
    }
    h = code

    for hidden_unit in hidden_units:
        h = Dense(hidden_unit, activation=tf.nn.leaky_relu, **kwargs)(h)

    h = Dense(1, name='dis_out', **kwargs)(h)
    logits = h

    return logits
Example #18
0
    def test_2d_reg_shape(self):
        input_shape = (2, 32, 32, 1)
        x = tf.ones(input_shape)

        highres_layer = HighRes3DNet(
            num_classes=5,
            w_regularizer=regularizers.l2_regularizer(0.5),
            b_regularizer=regularizers.l2_regularizer(0.5))
        out = highres_layer(x, is_training=True)
        # print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out)
            self.assertAllClose((2, 32, 32, 5), out.shape)
Example #19
0
    def test_2d_reg_shape(self):
        input_shape = (2, 20, 20, 1)
        x = tf.ones(input_shape)

        holistic_net_instance = HolisticNet(
            num_classes=3,
            w_regularizer=regularizers.l2_regularizer(0.5),
            b_regularizer=regularizers.l2_regularizer(0.5))
        out = holistic_net_instance(x, is_training=False)
        # print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

        with self.cached_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out)
            self.assertAllClose((2, 20, 20, 3), out.shape)
Example #20
0
    def test_2d_reg_shape(self):
        input_shape = (2, 20, 20, 1)
        x = tf.ones(input_shape)

        holistic_net_instance = HolisticNet(
            num_classes=3,
            w_regularizer=regularizers.l2_regularizer(0.5),
            b_regularizer=regularizers.l2_regularizer(0.5))
        out = holistic_net_instance(x, is_training=False)
        # print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out)
            self.assertAllClose((2, 20, 20, 3), out.shape)
Example #21
0
    def test_2d_reg_shape(self):
        input_shape = (2, 57, 57, 1)
        x = tf.ones(input_shape)

        deepmedic_instance = DeepMedic(
            num_classes=160,
            w_regularizer=regularizers.l2_regularizer(0.5),
            b_regularizer=regularizers.l2_regularizer(0.5))
        out = deepmedic_instance(x, is_training=True)
        # print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out)
            self.assertAllClose((2, 9, 9, 160), out.shape)
Example #22
0
    def test_2d_reg_shape(self):
        input_shape = (2, 57, 57, 1)
        x = tf.ones(input_shape)

        deepmedic_instance = DeepMedic(
            num_classes=160,
            w_regularizer=regularizers.l2_regularizer(0.5),
            b_regularizer=regularizers.l2_regularizer(0.5))
        out = deepmedic_instance(x, is_training=True)
        # print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out)
            self.assertAllClose((2, 9, 9, 160), out.shape)
Example #23
0
def discriminator2(img,
                   training=True,
                   weight_decay=0.0001,
                   batch_norm_decay=0.997,
                   batch_norm_epsilon=1e-5,
                   batch_norm_scale=True):
    batch_norm_params = {
        'is_training': training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'updates_collections': ops.GraphKeys.UPDATE_OPS,
    }

    disc = slim.conv2d(
        img,
        64, [4, 4], [2, 2],
        weights_regularizer=regularizers.l2_regularizer(weight_decay),
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=lrelu,
        scope="convolutionstart")
    with arg_scope(
        [slim.conv2d],
            weights_regularizer=regularizers.l2_regularizer(weight_decay),
            weights_initializer=initializers.variance_scaling_initializer(),
            activation_fn=lrelu,
            normalizer_fn=layers.batch_norm,
            normalizer_params=batch_norm_params):

        disc = slim.stack(disc,
                          slim.conv2d, [(128, [4, 4], [2, 2]),
                                        (256, [4, 4], [2, 2]),
                                        (512, [4, 4], [2, 2]),
                                        (1024, [4, 4], [2, 2])],
                          scope="convolution")

    disc = slim.conv2d(
        disc,
        1024, [1, 1],
        weights_regularizer=regularizers.l2_regularizer(weight_decay),
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=lrelu,
        scope="convolutionend")

    disc = tf.reshape(disc, [batch_size, 4 * 4 * 1024])
    disc = slim.fully_connected(disc, 1, activation_fn=None, scope="logits")

    return disc
Example #24
0
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
    # NOTE 'is_training' here does not work because inside resnet it gets reset:
    # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
    'is_training': False,
    'decay': batch_norm_decay,
    'epsilon': batch_norm_epsilon,
    'scale': batch_norm_scale,
    'trainable': cfg.RESNET.BN_TRAIN,
    'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
      return arg_sc
Example #25
0
 def test_deconvlayer_3d_bias_reg_shape(self):
     input_param = {
         'n_output_chns': 10,
         'kernel_size': 3,
         'stride': 1,
         'with_bias': True,
         'feature_normalization': None,
         'w_regularizer': regularizers.l2_regularizer(0.5),
         'b_regularizer': regularizers.l2_regularizer(0.5)
     }
     self._test_deconv_layer_output_shape(rank=3,
                                          param_dict=input_param,
                                          output_shape=(2, 16, 16, 16, 10))
     self._test_deconv_layer_output_shape(rank=3,
                                          param_dict=input_param,
                                          output_shape=(2, 16, 16, 16, 10))
Example #26
0
File: vae.py Project: rn5l/rsc18
    def build_graph(self):
        self._construct_weights()

        saver, logits, KL = self.forward_pass()
        log_softmax_var = tf.nn.log_softmax(logits)

        neg_ll = -tf.reduce_mean(
            tf.reduce_sum(log_softmax_var * self.input_ph, axis=-1))
        # apply regularization to weights
        reg = l2_regularizer(self.lam)

        reg_var = apply_regularization(reg, self.weights_q + self.weights_p)
        # tensorflow l2 regularization multiply 0.5 to the l2 norm
        # multiply 2 so that it is back in the same scale
        neg_ELBO = neg_ll + self.anneal_ph * KL + 2 * reg_var

        train_op = tf.train.AdamOptimizer(self.lr).minimize(neg_ELBO)

        # add summary statistics
        tf.summary.scalar('negative_multi_ll', neg_ll)
        tf.summary.scalar('KL', KL)
        tf.summary.scalar('neg_ELBO_train', neg_ELBO)
        merged = tf.summary.merge_all()

        return saver, logits, neg_ELBO, train_op, merged
def high_order_module(inputs, order, scope):
    with tf.variable_scope(scope):
        in_channels = inputs.shape[3]
        inter_channels = in_channels // 8 * 2

        attention = 0
        with arg_scope([layers_lib.conv2d],
                       weights_regularizer=regularizers.l2_regularizer(0.0001),
                       weights_initializer=initializers.
                       variance_scaling_initializer(),
                       activation_fn=None,
                       biases_initializer=None):
            for i in range(order):
                out = 1.0
                for j in range(i + 1):
                    out *= layers_lib.conv2d(
                        inputs,
                        inter_channels,
                        1,
                        scope='hoa_conv_stage_1_order_%d_%d' % (i + 1, j + 1))
                out = tf.nn.relu(out)
                out = layers_lib.conv2d(
                    out,
                    in_channels,
                    1,
                    scope='hoa_conv_stage_2_conv_order_%d' % (i + 1))
                out = tf.nn.sigmoid(out)
                attention += out

        output = inputs * attention / order

    return output
Example #28
0
    def __init__(self, mode, image, caption, is_training_inception,
                 is_first_time):
        self.config = ModelConfig()
        self.is_first_time = is_first_time
        self.initializer = tf.random_uniform_initializer(
            -self.config.initializer_scale, self.config.initializer_scale)
        self.mode = mode
        self.image = image
        self.caption = caption
        self.caption_len = None
        self.target = None
        self.global_step = tf.Variable(initial_value=0,
                                       name="global_step",
                                       trainable=False,
                                       collections=[
                                           tf.GraphKeys.GLOBAL_STEP,
                                           tf.GraphKeys.GLOBAL_VARIABLES
                                       ])

        self.build(is_training_inception)

        assert mode in ["train", "eval", "inference"]
        if mode == "train":
            self.regularizer = l2_regularizer(0.00004)
        else:
            self.regularizer = None
Example #29
0
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
    batch_norm_params = {
        # NOTE 'is_training' here does not work because inside resnet it gets reset:
        # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
        'is_training': False,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'trainable': cfg.RESNET.BN_TRAIN,
        'updates_collections': ops.GraphKeys.UPDATE_OPS,
        'fused': True
    }

    with arg_scope(
        [slim.conv2d, slim.separable_conv2d],
            weights_regularizer=regularizers.l2_regularizer(weight_decay),
            weights_initializer=initializers.variance_scaling_initializer(),
            trainable=is_training,
            activation_fn=nn_ops.relu,
            #       normalizer_fn=None,
            #       normalizer_params=None):
            normalizer_fn=layers.batch_norm,
            normalizer_params=batch_norm_params):
        with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
            return arg_sc
Example #30
0
def resnet_arg_scope(is_training=True,
                     weight_decay=0.0001,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  batch_norm_params = {
      'is_training': False,
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'trainable': cfg.RESNET.BN_TRAIN,
      'updates_collections': ops.GraphKeys.UPDATE_OPS
  }

  with arg_scope(
      [slim.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      trainable=is_training,
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params):
      # The following implies padding='SAME' for pool1, which makes feature
      # alignment easier for dense prediction tasks. This is also used in
      # https://github.com/facebook/fb.resnet.torch. However the accompanying
      # code of 'Deep Residual Learning for Image Recognition' uses
      # padding='VALID' for pool1. You can switch to that choice by setting
      # tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
      with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc
def slim_net_original(image, keep_prob):
    with arg_scope([layers.conv2d, layers.fully_connected], biases_initializer=tf.random_normal_initializer(stddev=0.1)):

        # conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME',
        # activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None,
        # weights_initializer=initializers.xavier_initializer(), weights_regularizer=None,
        # biases_initializer=init_ops.zeros_initializer, biases_regularizer=None, scope=None):
        net = layers.conv2d(image, 32, [5, 5], scope='conv1', weights_regularizer=regularizers.l1_regularizer(0.5))

        # max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None)
        net = layers.max_pool2d(net, 2, scope='pool1')

        net = layers.conv2d(net, 64, [5, 5], scope='conv2', weights_regularizer=regularizers.l2_regularizer(0.5))
        summaries.summarize_tensor(net, tag='conv2')

        net = layers.max_pool2d(net, 2, scope='pool2')

        net = layers.flatten(net, scope='flatten1')

        # fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None,
        # normalizer_params=None, weights_initializer=initializers.xavier_initializer(),
        # weights_regularizer=None, biases_initializer=init_ops.zeros_initializer,
        # biases_regularizer=None, scope=None):
        net = layers.fully_connected(net, 1024, scope='fc1')

        # dropout(inputs, keep_prob=0.5, is_training=True, scope=None)
        net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1')

        net = layers.fully_connected(net, 10, scope='fc2')
    return net
Example #32
0
    def initialise_network(self):
        w_regularizer = None
        b_regularizer = None
        reg_type = self.net_param.reg_type.lower()
        decay = self.net_param.decay
        if reg_type == 'l2' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l2_regularizer(decay)
            b_regularizer = regularizers.l2_regularizer(decay)
        elif reg_type == 'l1' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l1_regularizer(decay)
            b_regularizer = regularizers.l1_regularizer(decay)

        self.net = ApplicationNetFactory.create(self.net_param.name)(
            w_regularizer=w_regularizer, b_regularizer=b_regularizer)
Example #33
0
def resnet_arg_scope(bn_is_training,
                     bn_trainable,
                     trainable=True,
                     weight_decay=config.TRAIN.weight_decay_factor,
                     batch_norm_decay=0.99,
                     batch_norm_epsilon=1e-9,
                     batch_norm_scale=True,
                     data_format='NHWC'):
    batch_norm_params = {
        'is_training': bn_is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'trainable': bn_trainable,
        'updates_collections': ops.GraphKeys.UPDATE_OPS,
        'fused': True
    }

    with arg_scope(
        [slim.conv2d, slim.separable_conv2d],
            weights_regularizer=regularizers.l2_regularizer(weight_decay),
            weights_initializer=initializers.variance_scaling_initializer(),
            trainable=trainable,
            activation_fn=nn_ops.relu,
            normalizer_fn=slim.batch_norm
            if 'BN' in config.TRAIN.norm else GroupNorm,
            normalizer_params=batch_norm_params
            if 'BN' in config.TRAIN.norm else None,
            data_format=data_format):
        with arg_scope([layers.batch_norm, layers.max_pool2d],
                       data_format=data_format):
            with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:

                return arg_sc
Example #34
0
    def prepare_inference(self):
        
        self.prepare_data()

        siamese = self.make_subnetwork(is_siamese = True)
        pseudo = self.make_subnetwork(is_siamese = False)
            
        fused_fts = tf.concat((siamese, pseudo), axis=1, name='fused_features')
        
        with tf.variable_scope("fuse_layer") as scope:  # @UnusedVariable
            self.logits = tf.layers.dense(fused_fts, 2, None, use_bias=True,  # @UndefinedVariable
                            kernel_initializer=tf.truncated_normal_initializer(stddev=self.stddev, dtype=tf.float32),
                            bias_initializer=tf.constant_initializer(0.01, dtype=tf.float32),
                            kernel_regularizer=l2_regularizer(self.wd),
                            bias_regularizer=l2_regularizer(self.wd), 
                            name='fused_layer')
Example #35
0
def resnet_arg_scope(is_training=True,
                     weight_decay=cfg.TRAIN.WEIGHT_DECAY,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
    data_format = 'NCHW' if cfg.RESNET.USE_NCHW else 'NHWC'
    batch_norm_params = {
        # NOTE 'is_training' is set appropriately inside of the resnet if we pass it to it:
        # https://github.com/tensorflow/models/blob/master/slim/nets/resnet_v1.py#L187
        # 'is_training': False,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'trainable': True,
        'data_format': data_format,
        'fused': True
    }

    with arg_scope(
        [slim.conv2d, slim.conv2d_transpose],
            weights_regularizer=regularizers.l2_regularizer(weight_decay),
            weights_initializer=initializers.variance_scaling_initializer(),
            trainable=is_training,
            activation_fn=nn_ops.relu,
            normalizer_fn=layers.batch_norm,
            data_format=data_format,
            normalizer_params=batch_norm_params):
        with arg_scope([slim.max_pool2d, resnet_utils.conv2d_same],
                       data_format=data_format):
            with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
                return arg_sc
Example #36
0
    def test_3d_reg_shape(self):
        input_shape = (2, 32, 32, 32, 1)
        x = tf.ones(input_shape)

        # vnet_instance = VNet(num_classes=160)
        vnet_instance = VNet(num_classes=160,
                             w_regularizer=regularizers.l2_regularizer(0.4),
                             b_regularizer=regularizers.l2_regularizer(0.4))
        out = vnet_instance(x, is_training=True)
        print(vnet_instance.num_trainable_params())
        # print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out)
            self.assertAllClose((2, 32, 32, 32, 160), out.shape)
Example #37
0
def resnet_arg_scope(bn_is_training,
                     bn_trainable,
                     trainable=True,
                     weight_decay=cfg.weight_decay,
                     batch_norm_decay=0.99,
                     batch_norm_epsilon=1e-9,
                     batch_norm_scale=True):
    batch_norm_params = {
        'is_training': bn_is_training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'trainable': bn_trainable,
        'updates_collections': ops.GraphKeys.UPDATE_OPS
    }

    with arg_scope(
        [slim.conv2d],
            weights_regularizer=regularizers.l2_regularizer(weight_decay),
            weights_initializer=initializers.variance_scaling_initializer(),
            trainable=trainable,
            activation_fn=nn_ops.relu,
            normalizer_fn=layers.batch_norm,
            normalizer_params=batch_norm_params):
        with arg_scope([layers.batch_norm], **batch_norm_params) as arg_sc:
            return arg_sc
Example #38
0
    def deconv_layer(self, deconv_input, layers_to_skip, reuse):
        feature_maps = self.feature_maps[:-1][::-1] + [
            image_channels,
        ]
        kernel_size = self.kernel_size[:-1][::-1] + [
            3,
        ]
        stride_size = self.stride_size[1:][::-1] + [
            1,
        ]
        assert len(kernel_size) == len(feature_maps) == len(
            stride_size), "lens must be equal"
        layers_to_skip_d = layers_to_skip[:-1][::-1]
        net = deconv_input
        with tf.variable_scope('deconv_autoencoder', reuse=reuse):
            for i, (each_feat_map, each_kernel_size, each_stride) in enumerate(
                    zip(feature_maps, kernel_size, stride_size)):
                activation = tf.nn.relu
                if i == (len(stride_size) - 1):
                    # last layer !
                    activation = tf.nn.tanh
                if i > 0:
                    # not first layer !
                    net = tf.concat([net, layers_to_skip_d[i - 1]], axis=3)
                net = slim.conv2d_transpose(
                    net,
                    each_feat_map, [each_kernel_size, each_kernel_size],
                    stride=each_stride,
                    activation_fn=activation,
                    scope='deconv_' + str(i),
                    weights_initializer=trunc_normal(0.01),
                    weights_regularizer=regularizers.l2_regularizer(l2_val))

            return net
Example #39
0
def generator1(z, training=True, weight_decay=0.0001, batch_norm_decay=0.997,
        batch_norm_epsilon=1e-5, batch_norm_scale=True):
    batch_norm_params = {
        'is_training': training,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'updates_collections': ops.GraphKeys.UPDATE_OPS,
    }

    c0 = tf.reshape(z, [batch_size, 1, 1, z_dim])

    with arg_scope(
        [slim.conv2d],
        weights_regularizer=regularizers.l2_regularizer(weight_decay),
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers.batch_norm,
        normalizer_params=batch_norm_params):

        gen = tf.image.resize_nearest_neighbor(c0, [2,2])
        gen = slim.conv2d(gen, 1024, [3,3], [1,1], scope="convolution1")

        gen = tf.image.resize_nearest_neighbor(gen, [4,4])
        gen = slim.conv2d(gen, 512, [3,3], [1,1], scope="convolution2")

        gen = tf.image.resize_nearest_neighbor(gen, [8,8])
        gen = slim.conv2d(gen, 256, [3,3], [1,1], scope="convolution3")

        gen = tf.image.resize_nearest_neighbor(gen, [16,16])
        gen = slim.conv2d(gen, 128, [3,3], [1,1], scope="convolution4")

        gen = tf.image.resize_nearest_neighbor(gen, [32,32])
        gen = slim.conv2d(gen, 64, [3,3], [1,1], scope="convolution5")

        gen = tf.image.resize_nearest_neighbor(gen, [64,64])

        # l = [(4096, [3,3], [2,2]), (2048, [3,3], [2,2]), (2048, [3,3], [2,2]),
        #     (2048, [3,3], [2,2]), (1024, [3,3], [2,2]), (512, [3,3], [2,2]), (256, [3,3], [2,2]), (128, [3,3], [1,1]), (3, [3,3], [1,1])]


    gen = slim.conv2d(gen, 3, [3,3], [1,1], weights_regularizer=regularizers.l2_regularizer(weight_decay),
        weights_initializer=initializers.variance_scaling_initializer(), activation_fn=tf.tanh, scope="convolutionend")


    return gen
Example #40
0
 def test_apply_zero_regularization(self):
   regularizer = regularizers.l2_regularizer(0.0)
   array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
   tensor_weights_list = [constant_op.constant(x) for x in array_weights_list]
   with self.cached_session():
     result = regularizers.apply_regularization(regularizer,
                                                tensor_weights_list)
     self.assertAllClose(0.0, result.eval())
def inception_2d_fields(img,
                        fields,
                        num_classes=30,
                        is_training=True,
                        dropout_keep_prob=0.6,
                        prediction_fn=layers_lib.softmax,
                        spatial_squeeze=True,
                        reuse=None,
                        scope='InceptionV1_Fields'
                        ):
    with arg_scope([layers.conv2d, layers_lib.fully_connected],
                   weights_initializer=tf.contrib.layers.xavier_initializer(),
                   biases_initializer=tf.constant_initializer(0.2),
                   weights_regularizer=regularizers.l2_regularizer(0.0002),
                   biases_regularizer=regularizers.l2_regularizer(0.0002)):
        net, end_points = inception_2d.inception_v1_base(img, scope=scope, final_endpoint='Mixed_4b')
        with variable_scope.variable_scope('Logits'):
            net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope='AvgPool_0a_5x5')
            net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)
            net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])
            net = array_ops.squeeze(net,[1,2],name='Squeeze4Fields')
            net = tf.concat([net,fields],axis=1)
            net = layers.fully_connected(inputs=net, num_outputs=1024)
            net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
            logits = layers.fully_connected(inputs=net,
                                            num_outputs=num_classes,
                                            activation_fn=None,
                                            weights_initializer=tf.contrib.layers.xavier_initializer(),
                                            biases_initializer=tf.constant_initializer(0.0),
                                            weights_regularizer=regularizers.l2_regularizer(0.0002),
                                            biases_regularizer=regularizers.l2_regularizer(0.0002),
                                            scope='InnerProduct')
            # logits = layers.conv2d(
            #     net,
            #     num_classes, [1, 1],
            #     activation_fn=None,
            #     normalizer_fn=None,
            #     scope='Conv2d_0c_1x1')
            if spatial_squeeze:
                logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')

            end_points['Logits'] = logits
            end_points['Predictions'] = prediction_fn(logits, scope='Predictions')


    return logits, end_points
Example #42
0
def stpnet_v4(inputs,
              num_outputs=30,
              dropout_keep_prob=0.8,
              pooling_kernel_size=[[1, 2, 2], [1, 4, 4], [1, 8, 8]],
              pooling_stride=[[1, 2, 2], [1, 4, 4], [1, 8, 8]],
              scope='Inception3D'):
    # Inception v1 based
    with tf.variable_scope(scope, 'InceptionV1', [inputs]) as scope:
        net, end_points = stpnet_base(inputs=inputs,
                                      final_endpoint='Inception_5b')
        with tf.variable_scope('Logits'):
            pyramid_0 = tf.contrib.layers.avg_pool3d(
                inputs=net,
                kernel_size=pooling_kernel_size[0],
                stride=pooling_stride[0],
                scope='AvgPool3D_Pyramid_0')
            pyramid_0_flattened = tf.contrib.layers.flatten(pyramid_0)
            pyramid_1 = tf.contrib.layers.avg_pool3d(
                inputs=net,
                kernel_size=pooling_kernel_size[1],
                stride=pooling_stride[1],
                scope='AvgPool3D_Pyramid_1')
            pyramid_1_flattened = tf.contrib.layers.flatten(pyramid_1)
            pyramid_2 = tf.contrib.layers.avg_pool3d(
                inputs=net,
                kernel_size=pooling_kernel_size[2],
                stride=pooling_stride[2],
                scope='AvgPool3D_Pyramid_2')
            pyramid_2_flattened = tf.contrib.layers.flatten(pyramid_2)
            net = tf.concat([
                pyramid_0_flattened, pyramid_1_flattened, pyramid_2_flattened
            ],
                            axis=1)
            end_points['AvgPool3D'] = net
            net = tf.layers.dropout(inputs=net, rate=dropout_keep_prob)
            logits = tf.contrib.layers.fully_connected(
                inputs=net,
                num_outputs=num_outputs,
                activation_fn=None,
                weights_initializer=tf.contrib.layers.xavier_initializer(),
                biases_initializer=tf.constant_initializer(0.0),
                weights_regularizer=regularizers.l2_regularizer(0.0002),
                biases_regularizer=regularizers.l2_regularizer(0.0002),
                scope='InnerProduct')
            end_points['Logits'] = logits
            return logits, end_points
    def initialise_network(self):
        w_regularizer = None
        b_regularizer = None
        reg_type = self.net_param.reg_type.lower()
        decay = self.net_param.decay
        if reg_type == 'l2' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l2_regularizer(decay)
            b_regularizer = regularizers.l2_regularizer(decay)
        elif reg_type == 'l1' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l1_regularizer(decay)
            b_regularizer = regularizers.l1_regularizer(decay)

        self.net = ApplicationNetFactory.create(self.net_param.name)(
            w_regularizer=w_regularizer,
            b_regularizer=b_regularizer)
Example #44
0
    def test_3d_reg_shape(self):
        input_shape = (2, 32, 32, 32, 1)
        x = tf.ones(input_shape)

        # vnet_instance = VNet(num_classes=160)
        vnet_instance = VNet(
            num_classes=160,
            w_regularizer=regularizers.l2_regularizer(0.4),
            b_regularizer=regularizers.l2_regularizer(0.4))
        out = vnet_instance(x, is_training=True)
        print(vnet_instance.num_trainable_params())
        # print(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out)
            self.assertAllClose((2, 32, 32, 32, 160), out.shape)
def alexnet_v2_arg_scope(weight_decay=0.0005):
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
            activation_fn=nn_ops.relu,
            biases_initializer=init_ops.constant_initializer(0.1),
            weights_regularizer=regularizers.l2_regularizer(weight_decay)):
        with arg_scope([layers.conv2d], padding='SAME'):
            with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
                return arg_sc
Example #46
0
def alexnet_v2_arg_scope(weight_decay=0.0005):
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      activation_fn=nn_ops.relu,
      biases_initializer=init_ops.constant_initializer(0.1),
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope([layers.conv2d], padding='SAME'):
      with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
        return arg_sc
Example #47
0
def train(config_file):
    # 1, load configuration parameters
    config = parse_config(config_file)
    config_data = config['data']
    config_net = config['network']
    config_train = config['training']

    random.seed(config_train.get('random_seed', 1))
    assert (config_data['with_ground_truth'])

    net_type = config_net['net_type']
    net_name = config_net['net_name']
    class_num = config_net['class_num']
    batch_size = config_data.get('batch_size', 5)

    # 2, construct graph
    full_data_shape = [batch_size] + config_data['data_shape']
    full_label_shape = [batch_size] + config_data['label_shape']
    x = tf.placeholder(tf.float32, shape=full_data_shape)
    w = tf.placeholder(tf.float32, shape=full_label_shape)
    y = tf.placeholder(tf.int64, shape=full_label_shape)

    w_regularizer = regularizers.l2_regularizer(config_train.get(
        'decay', 1e-7))
    b_regularizer = regularizers.l2_regularizer(config_train.get(
        'decay', 1e-7))
    net_class = NetFactory.create(net_type)
    net = net_class(num_classes=class_num,
                    w_regularizer=w_regularizer,
                    b_regularizer=b_regularizer,
                    name=net_name)
    net.set_params(config_net)
    predicty = net(x, is_training=True)
    proby = tf.nn.softmax(predicty)

    loss_func = LossFunction(n_class=class_num)
    loss = loss_func(predicty, y, weight_map=w)
    print('size of predicty:', predicty)

    # 3, initialize session and saver
    lr = config_train.get('learning_rate', 1e-3)
    opt_step = tf.train.AdamOptimizer(lr).minimize(loss)
    tf.summary.FileWriter("./graphs/" + config_net['net_name'],
                          tf.get_default_graph()).close()
Example #48
0
 def test_3d_prelu_reg_shape(self):
     x = self.get_3d_input()
     prelu_layer = ActiLayer(func='prelu',
                             regularizer=regularizers.l2_regularizer(0.5),
                             name='regularized')
     out_prelu = prelu_layer(x)
     print(prelu_layer)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         out = sess.run(out_prelu)
         self.assertAllClose((2, 16, 16, 16, 8), out.shape)
Example #49
0
 def test_fclayer_3d_bn_reg_dropout_valid_shape(self):
     input_param = {'n_output_chns': 10,
                    'with_bias': False,
                    'with_bn': True,
                    'w_regularizer': regularizers.l2_regularizer(0.5),
                    'acti_func': 'prelu', }
     self._test_fc_layer_output_shape(rank=3,
                                      param_dict=input_param,
                                      output_shape=(2, 10),
                                      is_training=True,
                                      dropout_prob=0.4)
Example #50
0
 def test_convlayer_2d_bn_reg_prelu_shape(self):
     input_param = {'n_output_chns': 10,
                    'kernel_size': 3,
                    'stride': 1,
                    'with_bias': False,
                    'with_bn': True,
                    'acti_func': 'prelu',
                    'w_regularizer': regularizers.l2_regularizer(0.5)}
     self._test_conv_layer_output_shape(rank=2,
                                        param_dict=input_param,
                                        output_shape=(2, 16, 16, 10),
                                        is_training=True)
Example #51
0
    def shape_test_reg(self, input_shape, expected_shape):
        x = tf.ones(input_shape)
        layer_param = {
            'num_classes': 5,
            'w_regularizer': regularizers.l2_regularizer(0.5),
            'b_regularizer': regularizers.l2_regularizer(0.5)}

        highres_layer = HighRes3DNet(**layer_param)
        highres_layer_small = HighRes3DNetSmall(**layer_param)
        highres_layer_large = HighRes3DNetLarge(**layer_param)

        out = highres_layer(x, is_training=True)
        out_small = highres_layer_small(x, is_training=True)
        out_large = highres_layer_large(x, is_training=True)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out, out_large, out_small = sess.run([out, out_large, out_small])
            self.assertAllClose(expected_shape, out.shape)
            self.assertAllClose(expected_shape, out_large.shape)
            self.assertAllClose(expected_shape, out_small.shape)
Example #52
0
    def test_2d_reg_shape(self):
        input_shape = (2, 96, 96, 1)
        x = tf.ones(input_shape)

        unet_instance = UNet3D(num_classes=160,
                               w_regularizer=regularizers.l2_regularizer(0.4))
        out = unet_instance(x, is_training=True)
        print(unet_instance.num_trainable_params())

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out)
            self.assertAllClose((2, 8, 8, 160), out.shape)
Example #53
0
def resnet_arg_scope(is_training=True,
                     weight_decay=0.0001,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
  """Defines the default ResNet arg scope.

  TODO(gpapan): The batch-normalization related default values above are
    appropriate for use in conjunction with the reference ResNet models
    released at https://github.com/KaimingHe/deep-residual-networks. When
    training ResNets from scratch, they might need to be tuned.

  Args:
    is_training: Whether or not we are training the parameters in the batch
      normalization layers of the model.
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: The moving average decay when estimating layer activation
      statistics in batch normalization.
    batch_norm_epsilon: Small constant to prevent division by zero when
      normalizing activations by their variance in batch normalization.
    batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
      activations in the batch normalization layer.

  Returns:
    An `arg_scope` to use for the resnet models.
  """
  batch_norm_params = {
      'is_training': is_training,
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
  }

  with arg_scope(
      [layers_lib.conv2d],
      weights_regularizer=regularizers.l2_regularizer(weight_decay),
      weights_initializer=initializers.variance_scaling_initializer(),
      activation_fn=nn_ops.relu,
      normalizer_fn=layers.batch_norm,
      normalizer_params=batch_norm_params):
    with arg_scope([layers.batch_norm], **batch_norm_params):
      # The following implies padding='SAME' for pool1, which makes feature
      # alignment easier for dense prediction tasks. This is also used in
      # https://github.com/facebook/fb.resnet.torch. However the accompanying
      # code of 'Deep Residual Learning for Image Recognition' uses
      # padding='VALID' for pool1. You can switch to that choice by setting
      # tf.contrib.framework.arg_scope([tf.contrib.layers.max_pool2d], padding='VALID').
      with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc
Example #54
0
 def test_convlayer_3d_bn_reg_dropout_valid_shape(self):
     input_param = {'n_output_chns': 10,
                    'kernel_size': [5, 3, 2],
                    'stride': [2, 2, 3],
                    'with_bias': False,
                    'with_bn': True,
                    'w_regularizer': regularizers.l2_regularizer(0.5),
                    'acti_func': 'prelu',
                    'padding': 'VALID'}
     self._test_conv_layer_output_shape(rank=3,
                                        param_dict=input_param,
                                        output_shape=(2, 6, 7, 5, 10),
                                        is_training=True,
                                        dropout_prob=0.4)
    def initialise_network(self):
        w_regularizer = None
        b_regularizer = None
        reg_type = self.net_param.reg_type.lower()
        decay = self.net_param.decay
        if reg_type == 'l2' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l2_regularizer(decay)
            b_regularizer = regularizers.l2_regularizer(decay)
        elif reg_type == 'l1' and decay > 0:
            from tensorflow.contrib.layers.python.layers import regularizers
            w_regularizer = regularizers.l1_regularizer(decay)
            b_regularizer = regularizers.l1_regularizer(decay)

        self.net = ApplicationNetFactory.create(self.net_param.name)(
            num_classes=self.classification_param.num_classes,
            w_initializer=InitializerFactory.get_initializer(
                name=self.net_param.weight_initializer),
            b_initializer=InitializerFactory.get_initializer(
                name=self.net_param.bias_initializer),
            w_regularizer=w_regularizer,
            b_regularizer=b_regularizer,
            acti_func=self.net_param.activation_function)
Example #56
0
    def test_2d_reg_shape(self):
        input_shape = (2, 32, 32, 4)
        x = tf.ones(input_shape)

        scalenet_layer = ScaleNet(num_classes=5,
                                  w_regularizer=regularizers.l2_regularizer(
                                      0.3))
        out = scalenet_layer(x, is_training=True)
        print(scalenet_layer.num_trainable_params())

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out)
            self.assertAllClose((2, 32, 32, 5), out.shape)
Example #57
0
def inception_v3_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars',
                           batch_norm_decay=0.9997,
                           batch_norm_epsilon=0.001,
                           updates_collections=ops.GraphKeys.UPDATE_OPS,
                           use_fused_batchnorm=True):
  """Defines the default InceptionV3 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.
    batch_norm_decay: Decay for batch norm moving average
    batch_norm_epsilon: Small float added to variance to avoid division by zero
    updates_collections: Collections for the update ops of the layer
    use_fused_batchnorm: Enable fused batchnorm.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': batch_norm_decay,
      # epsilon to prevent 0s in variance.
      'epsilon': batch_norm_epsilon,
      # collection containing update_ops.
      'updates_collections': updates_collections,
      # Use fused batch norm if possible.
      'fused': use_fused_batchnorm,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc
Example #58
0
 def test_deconvlayer_2d_bn_reg_shape(self):
     input_param = {'n_output_chns': 10,
                    'kernel_size': [3, 1],
                    'stride': [1, 3],
                    'with_bias': False,
                    'with_bn': True,
                    'w_regularizer': regularizers.l2_regularizer(0.5)}
     self._test_deconv_layer_output_shape(rank=2,
                                          param_dict=input_param,
                                          output_shape=(2, 16, 48, 10),
                                          is_training=True)
     self._test_deconv_layer_output_shape(rank=2,
                                          param_dict=input_param,
                                          output_shape=(2, 16, 48, 10),
                                          is_training=False)
Example #59
0
    def test_2d_reg_shape(self):
        #input_shape = (2, 572, 572, 5)
        input_shape = (2, 180, 180, 5)
        x = tf.ones(input_shape)

        unet_instance = UNet2D(num_classes=2,
                               w_regularizer=regularizers.l2_regularizer(0.4))
        out = unet_instance(x, is_training=True)
        print(unet_instance.num_trainable_params())

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            out = sess.run(out)
            #self.assertAllClose((2, 388, 388, 2), out.shape)
            self.assertAllClose((2, 4, 4, 2), out.shape)
Example #60
0
def inception_v1_arg_scope(weight_decay=0.00004,
                           use_batch_norm=True,
                           batch_norm_var_collection='moving_vars'):
  """Defines the default InceptionV1 arg scope.

  Note: Althougth the original paper didn't use batch_norm we found it useful.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    use_batch_norm: "If `True`, batch_norm is applied after each convolution.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': 0.9997,
      # epsilon to prevent 0s in variance.
      'epsilon': 0.001,
      # collection containing update_ops.
      'updates_collections': ops.GraphKeys.UPDATE_OPS,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }
  if use_batch_norm:
    normalizer_fn = layers_lib.batch_norm
    normalizer_params = batch_norm_params
  else:
    normalizer_fn = None
    normalizer_params = {}
  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=normalizer_fn,
        normalizer_params=normalizer_params) as sc:
      return sc