예제 #1
0
def transition_layers(out_channels, compression):
    return lib.Sequential(lib.layers.BatchNorm(activation='relu'),
                          lib.layers.Conv2D(out_channels=math.floor(out_channels * compression),
                                            kernel_size=1),
                          lib.layers.AvgPool2D(kernel_size=2,
                                               padding='SAME'),
                          name='transition')
예제 #2
0
 def stack_blocks_dense(self, blocks, in_channels, output_stride=None):
     current_stride = 4
     rate = 1
     for block in blocks:
         block_layer = lib.Sequential(name=block.name)
         block_stride = 1
         for i, unit in enumerate(block.args):
             unit['name'] = 'unit_%d' % (i + 1)
             unit['depth_in'] = in_channels
             if output_stride is not None and current_stride == output_stride:
                 block_layer.append(block.unit(**dict(unit, stride=1)))
                 rate *= unit.get('stride', 1)
             else:
                 block_layer.append(block.unit(**unit))
                 current_stride *= unit.get('stride', 1)
                 if output_stride is not None and current_stride > output_stride:
                     raise ValueError(
                         "The target output_stride can not be reached.")
             in_channels = unit.get('depth')
         if output_stride is not None and current_stride == output_stride:
             rate *= block_stride
         else:
             block_layer.append(resnet_v1_utils.sub_sample(block_stride))
             current_stride *= block_stride
             if output_stride is not None and current_stride > output_stride:
                 raise ValueError(
                     "The target output_stride can not be reached.")
         self.bottlenecks.append(block_layer)
     if output_stride is not None and current_stride != output_stride:
         raise ValueError("The target output_stride can not be reached.")
예제 #3
0
 def _make_layer(self, block, out_channels, num_block, strides=1):
     downsample = None
     if self.channels != out_channels * block.expansion:
         downsample = lib.contrib.Conv2D(out_channels * block.expansion,
                                         kernel_size=1,
                                         strides=strides)
     elif strides != 1:
         downsample = lib.layers.MaxPool2D(kernel_size=1, strides=strides)
     blocks = [block(out_channels, strides, downsample=downsample)]
     for i in range(1, num_block):
         blocks.append(block(out_channels))
     return lib.Sequential(*blocks)
예제 #4
0
 def __init__(self,
              blocks,
              num_classes=None,
              is_training=None,
              base_only=False,
              global_pool=True,
              output_stride=None,
              root_block=None,
              **kwargs):
     super(ResNetV1Beta, self).__init__(**kwargs)
     if not base_only and output_stride not in [32, None]:
         raise ValueError(
             "As the `base_only` is set to `False`, `output_stride` can only be 32 or None, "
             "but given %d." % output_stride)
     if output_stride is not None and output_stride not in [8, 16, 32]:
         raise ValueError(
             'Only allowed output_stride values are 8, 16, 32.')
     self.base_only = base_only
     self.global_pool = global_pool
     self.bottlenecks = lib.Sequential(name='')
     self.num_classes = num_classes
     if is_training is not None:
         arg_scope = lib.engine.arg_scope([lib.layers.BatchNorm],
                                          trainable=is_training)
     else:
         arg_scope = lib.engine.arg_scope([])
     with arg_scope:
         if root_block is None:
             self.conv1 = resnet_v1_utils.conv2d_same(out_channels=64,
                                                      kernel_size=7,
                                                      stride=2,
                                                      name='conv1')
         else:
             self.conv1 = root_block
         self.pool1 = lib.layers.MaxPool2D(kernel_size=3,
                                           strides=2,
                                           padding='SAME',
                                           name='pool1')
         self.stack_blocks_dense(blocks, 64, output_stride)
         if not base_only:
             if global_pool:
                 self.gpool = lib.layers.GlobalAvgPool(name='pool5')
             if num_classes is not None:
                 self.logits = lib.contrib.WSConv2D(
                     out_channels=num_classes,
                     kernel_size=1,
                     activation=None,
                     normalizer=None,
                     use_weight_standardization=False,
                     name='logits')
                 self.sp_squeeze = lib.layers.Lambda(
                     lambda x: tf.squeeze(x, axis=[1, 2]),
                     name='spatial_squeeze')
예제 #5
0
def root_block(depth_multiplier=1.0):
    return lib.Sequential(
        conv2d_same(depth=int(64 * depth_multiplier),
                    kernel_size=3,
                    stride=2,
                    name='conv1_1'),
        conv2d_same(depth=int(64 * depth_multiplier),
                    kernel_size=3,
                    stride=1,
                    name='conv1_2'),
        conv2d_same(depth=int(128 * depth_multiplier),
                    kernel_size=3,
                    stride=1,
                    name='conv1_3'))
예제 #6
0
def Convolution(out_channels,
                kernel_size,
                rank,
                strides=1,
                rate=1,
                padding='SAME',
                data_format='channels_last',
                kernel_initializer='truncated_normal',
                kernel_regularizer=None,
                bias_initializer='zeros',
                bias_regularizer=None,
                activation=None,
                normalizer=None,
                normalizer_params=None,
                trainable=False,
                name=None):
    if rank == 1:
        layer_class = lib.layers.Conv1D
    elif rank == 2:
        layer_class = lib.layers.Conv2D
    elif rank == 3:
        layer_class = lib.layers.Conv3D
    else:
        raise ValueError("Rank must be in [1, 2, 3], but received:", rank)
    conv = layer_class(
        out_channels=out_channels,
        kernel_size=kernel_size,
        strides=strides,
        dilation_rate=rate,
        data_format=data_format,
        padding=padding,
        kernel_initializer=kernel_initializer,
        kernel_regularizer=kernel_regularizer,
        bias_initializer=bias_initializer,
        bias_regularizer=bias_regularizer,
        use_bias=not normalizer and bias_initializer,
        activation=None if normalizer else activation,
        trainable=trainable,
        name=name)
    if normalizer is not None:
        assert issubclass(normalizer, lib.Layer)
        normalizer_params = normalizer_params or {}
        bn = normalizer(name=conv.name + '/batch_norm',
                        activation=activation,
                        **normalizer_params)
        return lib.Sequential(conv, bn, name='')
    return conv
예제 #7
0
 def model_fn(features, labels):
     network = lib.Sequential(
         lib.layers.Flatten(),
         lib.layers.Dense(units=128, activation='relu'),
         lib.layers.Dense(units=10))
     network.train()
     outputs = network(features)
     loss = lib.training.SparseCategoricalCrossEntropy(
         from_logits=True)(labels, outputs)
     metrics = [
         lib.training.SparseCategoricalAccuracy()(labels, outputs)
     ]
     params = list(network.trainable_weights)
     return lib.training.ExecutorSpec(outputs=outputs,
                                      loss=loss,
                                      metrics=metrics,
                                      params=params)
예제 #8
0
def WSConv2D(out_channels,
             kernel_size,
             strides=(1, 1),
             padding='SAME',
             rate=1,
             data_format='channels_last',
             epsilon=1e-5,
             use_weight_standardization=False,
             kernel_initializer='truncated_normal',
             kernel_regularizer=None,
             bias_initializer='zeros',
             bias_regularizer=None,
             activation=None,
             normalizer=None,
             normalizer_params=None,
             trainable=False,
             name=None):
    conv = _WSConv2D(
        out_channels=out_channels,
        kernel_size=kernel_size,
        strides=strides,
        dilation_rate=rate,
        data_format=data_format,
        padding=padding,
        epsilon=epsilon,
        use_weight_standardization=use_weight_standardization,
        kernel_initializer=kernel_initializer,
        kernel_regularizer=kernel_regularizer,
        bias_initializer=bias_initializer,
        bias_regularizer=bias_regularizer,
        use_bias=not normalizer and bias_initializer,
        activation=None if normalizer else activation,
        trainable=trainable,
        name=name)
    if normalizer is not None:
        assert issubclass(normalizer, lib.Layer)
        normalizer_params = normalizer_params or {}
        bn = normalizer(name=conv.name + '/batch_norm',
                        activation=activation,
                        **normalizer_params)
        return lib.Sequential(conv, bn, name='')
    return conv
예제 #9
0
def conv2d_same(depth,
                kernel_size,
                stride,
                rate=1,
                activation=None,
                name='conv2d_same'):
    if stride == 1:
        return lib.contrib.Conv2D(depth,
                                  kernel_size=kernel_size,
                                  strides=stride,
                                  dilations=rate,
                                  activation=activation,
                                  name=name)
    else:
        return lib.Sequential(
            lib.layers.Pad2D(kernel_size=kernel_size, rate=rate),
            lib.contrib.Conv2D(depth,
                               kernel_size=kernel_size,
                               strides=stride,
                               dilations=rate,
                               padding='VALID',
                               activation=activation,
                               name=name))
예제 #10
0
def SeparableConv2D(out_channels,
                    kernel_size,
                    strides=(1, 1),
                    rate=(1, 1),
                    padding='SAME',
                    data_format='channels_last',
                    depth_multiplier=1,
                    kernel_initializer='truncated_normal',
                    kernel_regularizer=None,
                    pointwise_initializer='truncated_normal',
                    pointwise_regularizer=None,
                    bias_initializer='zeros',
                    bias_regularizer=None,
                    activation=None,
                    normalizer=None,
                    normalizer_params=None,
                    trainable=False,
                    name=None):
    if pointwise_initializer is None:
        pointwise_initializer = kernel_initializer
    if out_channels is not None:
        conv = lib.layers.SeparableConv2D(
            out_channels=out_channels,
            kernel_size=kernel_size,
            strides=strides,
            dilation_rate=rate,
            data_format=data_format,
            padding=padding,
            depth_multiplier=depth_multiplier,
            depthwise_initializer=kernel_initializer,
            depthwise_regularizer=kernel_regularizer,
            pointwise_initializer=pointwise_initializer,
            pointwise_regularizer=pointwise_regularizer,
            bias_initializer=bias_initializer,
            bias_regularizer=bias_regularizer,
            use_bias=not normalizer and bias_initializer,
            activation=None if normalizer else activation,
            trainable=trainable,
            name=name)
    else:
        conv = lib.layers.DepthWiseConv2D(
            kernel_size=kernel_size,
            strides=strides,
            dilation_rate=rate,
            data_format=data_format,
            padding=padding,
            depth_multiplier=depth_multiplier,
            depthwise_initializer=kernel_initializer,
            depthwise_regularizer=kernel_regularizer,
            bias_initializer=bias_initializer,
            bias_regularizer=bias_regularizer,
            use_bias=not normalizer and bias_initializer,
            activation=None if normalizer else activation,
            trainable=trainable,
            name=name)
    if normalizer is not None:
        assert issubclass(normalizer, lib.Layer)
        normalizer_params = normalizer_params or {}
        bn = normalizer(name=conv.name + '/batch_norm',
                        activation=activation,
                        **normalizer_params)
        return lib.Sequential(conv, bn, name='')
    return conv