Пример #1
0
 def __init__(self):
     super(HasMapping, self).__init__()
     self.layer_dict = data_structures.Mapping(output=core.Dense(7))
     self.layer_dict["norm"] = data_structures.List()
     self.layer_dict["dense"] = data_structures.List()
     self.layer_dict["dense"].extend([
         core.Dense(5),
         core.Dense(6, kernel_regularizer=math_ops.reduce_sum)
     ])
     self.layer_dict["norm"].append(normalization.BatchNormalization())
     self.layer_dict["norm"].append(normalization.BatchNormalization())
Пример #2
0
 def my_func():
     layer = normalization.BatchNormalization()
     x = array_ops.ones((10, 1))
     y = layer(x, training=True)
     # Updates should be tracked in a `wrap_function`.
     self.assertLen(layer.updates, 2)
     return y
Пример #3
0
    def test_v1_fused_attribute(self):
        norm = normalization.BatchNormalization()
        inp = keras.layers.Input((4, 4, 4))
        norm(inp)
        self.assertEqual(norm.fused, True)

        norm = normalization.BatchNormalization(fused=False)
        self.assertEqual(norm.fused, False)
        inp = keras.layers.Input(shape=(4, 4, 4))
        norm(inp)
        self.assertEqual(norm.fused, False)

        norm = normalization.BatchNormalization(virtual_batch_size=2)
        self.assertEqual(norm.fused, True)
        inp = keras.layers.Input(shape=(2, 2, 2))
        norm(inp)
        self.assertEqual(norm.fused, False)
    def benchmark_layers_batch_norm_nonfused_train(self):

        layer = normalization.BatchNormalization(fused=False)
        x = array_ops.ones((1, 1, 1, 1))

        def fn():
            layer(x, training=True)

        self._run(fn, 10000)
    def benchmark_layers_normalization_batch_normalization_overhead(self):

        layer = normalization.BatchNormalization()
        x = array_ops.ones((1, 1))

        def fn():
            layer(x, training=True)

        self._run(fn, 10000)
    def benchmark_layers_batch_norm_fused_inf(self):

        layer = normalization.BatchNormalization(fused=True)
        x = array_ops.ones((1, 1, 1, 1))

        def fn():
            layer(x)

        self._run(fn, 10000)
Пример #7
0
    def DenseBlock(self, inputs, outdim):
        inputshape = K.int_shape(inputs)
        bn = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(inputs)
        act = Activation('relu')(bn)
        # act = Dropout(rate=0.2)(act)
        conv1 = Conv2D(outdim, (3, 3), activation=None, padding='same', kernel_regularizer=regularizers.l2(0.0001))(act)
        if inputshape[3] != outdim:
            shortcut = Conv2D(outdim, (1, 1), padding='same', kernel_regularizer=regularizers.l2(0.0001))(inputs)
        else:
            shortcut = inputs
        result1 = add([conv1, shortcut])

        bn = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(result1)
        act = Activation('relu')(bn)
        # act = Dropout(rate=0.2)(act)
        conv2 = Conv2D(outdim, (3, 3), activation=None, padding='same', kernel_regularizer=regularizers.l2(0.0001))(act)
        result = add([result1, conv2, shortcut])
        result = Activation('relu')(result)
        return result
Пример #8
0
    def build_model(self):
        inputs = Input((self.patch_height, self.patch_width, 1))
        conv1 = Conv2D(32, (1, 1), activation=None, padding='same')(inputs)
        conv1 = normalization.BatchNormalization(epsilon=2e-05, axis=3, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv1)
        conv1 = Activation('relu')(conv1)

        conv1 = self.DenseBlock(conv1, 32)  # 48
        conv1 = self.se_block(ratio=2)(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = self.DenseBlock(pool1, 64)  # 24
        conv2 = self.se_block(ratio=2)(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = self.DenseBlock(pool2, 64)  # 12
        conv3 = self.se_block(ratio=2)(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = self.DenseBlock(pool3, 64)  # 12
        conv4 = self.se_block(ratio=2)(conv4)

        up1 = Conv2DTranspose(64, (3, 3), strides=2, activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.0001))(conv4)
        up1 = concatenate([up1, conv3], axis=3)

        conv5 = self.DenseBlock(up1, 64)
        conv5 = self.se_block(ratio=2)(conv5)

        up2 = Conv2DTranspose(64, (3, 3), strides=2, activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.0001))(conv5)
        up2 = concatenate([up2, conv2], axis=3)

        conv6 = self.DenseBlock(up2, 64)
        conv6 = self.se_block(ratio=2)(conv6)

        up3 = Conv2DTranspose(32, (3, 3), strides=2, activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.0001))(conv6)
        up3 = concatenate([up3, conv1], axis=3)

        conv7 = self.DenseBlock(up3, 32)
        conv7 = self.se_block(ratio=2)(conv7)

        conv8 = Conv2D(self.num_seg_class + 1, (1, 1), activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.0001))(conv7)
        # conv6 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv6)

        # for tensorflow
        conv8 = core.Reshape((self.patch_height * self.patch_width, self.num_seg_class + 1))(conv8)
        # for theano
        # conv8 = core.Reshape(((self.num_seg_class + 1), self.patch_height * self.patch_width))(conv8)
        # conv8 = core.Permute((2, 1))(conv8)
        ############
        act = Activation('softmax')(conv8)

        model = Model(inputs=inputs, outputs=act)
        model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'])
        plot_model(model, to_file=os.path.join(self.config.checkpoint, "model.png"), show_shapes=True)
        self.model = model
Пример #9
0
    def test_updates_in_wrap_function(self):
        layer = normalization.BatchNormalization()

        def my_func():
            x = array_ops.ones((10, 1))
            return layer(x, training=True)

        wrapped_fn = wrap_function.wrap_function(my_func, [])
        wrapped_fn()

        # Updates should be tracked in a `wrap_function`.
        self.assertLen(layer.updates, 2)
Пример #10
0
 def __init__(self):
     super(HasList, self).__init__()
     self.layer_list = data_structures.List([core.Dense(3)])
     self.layer_list.append(core.Dense(4))
     self.layer_list.extend([
         core.Dense(5),
         core.Dense(6, kernel_regularizer=math_ops.reduce_sum)
     ])
     self.layer_list += [
         core.Dense(7, bias_regularizer=math_ops.reduce_sum),
         core.Dense(8)
     ]
     self.layer_list += (data_structures.List([core.Dense(9)]) +
                         data_structures.List([core.Dense(10)]))
     self.layer_list.extend(
         data_structures.List(list([core.Dense(11)]) + [core.Dense(12)]))
     self.layers_with_updates = data_structures.List(
         (normalization.BatchNormalization(), ))
Пример #11
0
 def f(inputs):
     conv = Conv2D(filters, strides, padding=padding)(inputs)
     conv = normalization.BatchNormalization(
         epsilon=2e-05,
         axis=3,
         momentum=0.9,
         weights=None,
         beta_initializer='RandomNormal',
         gamma_initializer='one')(conv)
     conv = LeakyReLU(alpha=0.3)(conv)
     conv = Dropout(0.2)(conv)
     conv = Conv2D(filters, strides, dilation_rate=2,
                   padding=padding)(conv)
     conv = LeakyReLU(alpha=0.3)(conv)
     conv = Conv2D(filters, strides, dilation_rate=4,
                   padding=padding)(conv)
     conv = LeakyReLU(alpha=0.3)(conv)
     return conv
Пример #12
0
 def __init__(self):
     super(HasTuple, self).__init__()
     self.layer_list = (core.Dense(3), core.Dense(4),
                        core.Dense(5,
                                   kernel_regularizer=math_ops.reduce_sum))
     self.layers_with_updates = (normalization.BatchNormalization(), )
Пример #13
0
  def __init__(
      self,
      # DepthwiseConv2D params
      kernel_size,
      strides=(1, 1),
      padding='valid',
      depth_multiplier=1,
      data_format=None,
      depthwise_initializer='glorot_uniform',
      depthwise_regularizer=None,
      bias_regularizer=None,
      activity_regularizer=None,
      depthwise_constraint=None,
      bias_constraint=None,
      name=None,
      # BatchNormalization params
      axis=-1,
      momentum=0.99,
      epsilon=1e-3,
      center=True,
      scale=True,
      beta_initializer='zeros',
      gamma_initializer='ones',
      moving_mean_initializer='zeros',
      moving_variance_initializer='ones',
      beta_regularizer=None,
      gamma_regularizer=None,
      beta_constraint=None,
      gamma_constraint=None,
      renorm=False,
      renorm_clipping=None,
      renorm_momentum=0.99,
      fused=None,
      trainable=True,
      virtual_batch_size=None,
      adjustment=None,
      # Post-batchnorm activation instance.
      post_activation=None,
      # quantization params
      is_quantized=True,
      **kwargs):
    super(_DepthwiseConvBatchNorm2D, self).__init__(
        kernel_size,
        strides=strides,
        padding=padding,
        depth_multiplier=depth_multiplier,
        data_format=data_format,
        use_bias=False,
        depthwise_initializer=depthwise_initializer,
        depthwise_regularizer=depthwise_regularizer,
        bias_regularizer=bias_regularizer,
        activity_regularizer=activity_regularizer,
        depthwise_constraint=depthwise_constraint,
        bias_constraint=bias_constraint,
        name=name,
        **kwargs)

    self.batchnorm = normalization.BatchNormalization(
        axis=axis,
        momentum=momentum,
        epsilon=epsilon,
        center=center,
        scale=scale,
        beta_initializer=beta_initializer,
        gamma_initializer=gamma_initializer,
        moving_mean_initializer=moving_mean_initializer,
        moving_variance_initializer=moving_variance_initializer,
        beta_regularizer=beta_regularizer,
        gamma_regularizer=gamma_regularizer,
        beta_constraint=beta_constraint,
        gamma_constraint=gamma_constraint,
        renorm=renorm,
        renorm_clipping=renorm_clipping,
        renorm_momentum=renorm_momentum,
        fused=fused,
        trainable=trainable,
        virtual_batch_size=virtual_batch_size,
        adjustment=adjustment,
    )
    self.post_activation = activations.get(post_activation)

    self.is_quantized = is_quantized
    if self.is_quantized:
      self.weight_quantizer = default_8bit_quantizers.Default8BitConvWeightsQuantizer(
      )

      self.activation_quantizer = quantizers.MovingAverageQuantizer(
          num_bits=8, per_axis=False, symmetric=False, narrow_range=False)
  def __init__(
      self,
      # Conv2D params
      filters,
      kernel_size,
      strides=(1, 1),
      padding='valid',
      data_format=None,
      dilation_rate=(1, 1),
      kernel_initializer='glorot_uniform',
      kernel_regularizer=None,
      bias_regularizer=None,
      activity_regularizer=None,
      kernel_constraint=None,
      bias_constraint=None,
      name=None,
      # BatchNormalization params
      axis=-1,
      momentum=0.99,
      epsilon=1e-3,
      center=True,
      scale=True,
      beta_initializer='zeros',
      gamma_initializer='ones',
      moving_mean_initializer='zeros',
      moving_variance_initializer='ones',
      beta_regularizer=None,
      gamma_regularizer=None,
      beta_constraint=None,
      gamma_constraint=None,
      renorm=False,
      renorm_clipping=None,
      renorm_momentum=0.99,
      fused=None,
      trainable=True,
      virtual_batch_size=None,
      adjustment=None,
      # Post-batchnorm activation.
      post_activation=None,
      # quantization params
      is_quantized=True,
      **kwargs):
    super(_ConvBatchNorm2D, self).__init__(
        filters,
        kernel_size,
        strides=strides,
        padding=padding,
        data_format=data_format,
        dilation_rate=dilation_rate,
        use_bias=False,
        kernel_initializer=kernel_initializer,
        kernel_regularizer=kernel_regularizer,
        bias_regularizer=bias_regularizer,
        activity_regularizer=activity_regularizer,
        kernel_constraint=kernel_constraint,
        bias_constraint=bias_constraint,
        name=name,
        **kwargs)

    self.batchnorm = normalization.BatchNormalization(
        axis=axis,
        momentum=momentum,
        epsilon=epsilon,
        center=center,
        scale=scale,
        beta_initializer=beta_initializer,
        gamma_initializer=gamma_initializer,
        moving_mean_initializer=moving_mean_initializer,
        moving_variance_initializer=moving_variance_initializer,
        beta_regularizer=beta_regularizer,
        gamma_regularizer=gamma_regularizer,
        beta_constraint=beta_constraint,
        gamma_constraint=gamma_constraint,
        renorm=renorm,
        renorm_clipping=renorm_clipping,
        renorm_momentum=renorm_momentum,
        fused=fused,
        trainable=trainable,
        virtual_batch_size=virtual_batch_size,
        adjustment=adjustment,
    )

    # Named as post_activation to not conflict with Layer self.activation.
    self.post_activation = activations.get(post_activation)

    self.is_quantized = is_quantized
    if self.is_quantized:
      self.weight_quantizer = tflite_quantizers.ConvWeightsQuantizer()

      self.activation_quantizer = quantizers.MovingAverageQuantizer(
          num_bits=8, per_axis=False, symmetric=False, narrow_range=False)