Exemple #1
0
    def __init__(self,
                 dilation_rate,
                 nb_filters,
                 kernel_size,
                 padding,
                 dropout_rate=0.0,
                 conv_regularization=0.08):
        """
        Defines the residual block for TCN
        :param x: The previous layer in the model
        :param dilation_rate: The dilation rate for this residual block
        :param nb_filters: The number of convolutional filters to use in this block
        :param kernel_size: The size of the convolutional kernel
        :param padding: The padding used in the convolutional layers, 'same' or 'causal'.
        :param dropout_rate: Float between 0 and 1. Fraction of the input units to drop.
        :param conv_regularization: L2 regularization coefficient.
        :return: A tuple where the first element is the residual model layer, and the second is the skip connection.
        """
        super(ResidualBlock, self).__init__()

        self.dilation_rate = dilation_rate
        self.nb_filters = nb_filters
        self.kernel_size = kernel_size
        self.padding = padding
        self.dropout_rate = dropout_rate
        self.conv_regularization = conv_regularization

        self.con1 = layers.SeparableConv1D(
            filters=self.nb_filters,
            kernel_size=self.kernel_size,
            dilation_rate=self.dilation_rate,
            padding=self.padding,
            kernel_regularizer=regularizers.l2(conv_regularization),
            kernel_initializer=initializers.RandomNormal(mean=0.0,
                                                         stddev=0.01))
        self.dropout1 = layers.SpatialDropout1D(self.dropout_rate)
        self.con2 = layers.SeparableConv1D(
            filters=self.nb_filters,
            kernel_size=self.kernel_size,
            dilation_rate=self.dilation_rate,
            padding=self.padding,
            kernel_regularizer=regularizers.l2(conv_regularization),
            kernel_initializer=initializers.RandomNormal(mean=0.0,
                                                         stddev=0.01))
        self.dropout2 = layers.SpatialDropout1D(self.dropout_rate)
        self.conv_matching = layers.Conv1D(
            filters=nb_filters,
            kernel_size=1,
            padding='same',
            kernel_initializer=initializers.RandomNormal(mean=0.0,
                                                         stddev=0.01))
Exemple #2
0
    def create_channel(self, x, kernel_size, feature_map):
        """
        Creates a layer, working channel wise

        Arguments:
            x           : Input for convolutional channel
            kernel_size : Kernel size for creating Conv1D
            feature_map : Feature map

        Returns:
            x           : Channel including (Conv1D + {GlobalMaxPooling & GlobalAveragePooling} + Dense [+ Dropout])
        """
        x = layers.SeparableConv1D(feature_map,
                                   kernel_size=kernel_size,
                                   activation='relu',
                                   strides=1,
                                   padding='valid',
                                   depth_multiplier=4)(x)

        x1 = layers.GlobalMaxPooling1D()(x)
        x2 = layers.GlobalAveragePooling1D()(x)
        x = layers.concatenate([x1, x2])

        x = layers.Dense(self.hidden_units)(x)
        if self.dropout_rate:
            x = layers.Dropout(self.dropout_rate)(x)
        return x
Exemple #3
0
    def __init__(self, out_width, bottleneck=4, stride=2):
        """
        Constructs a downsample block with the final number of output
        feature maps given by `out_width`. Stride of the spatial convolution
        layer is given by `stride`. Take care to increase width appropriately
        for a given spatial downsample.

        The first two convolutions are bottlenecked according to `bottleneck`.

        Arguments:
            out_width:  Positive integer, number of output feature maps.

            bottleneck: Positive integer, factor by which to bottleneck
                        relative to `out_width`. Default 4.

            stride:     Positive integer or tuple of positive integers giving
                        the stride of the depthwise separable convolution layer.
                        If a single value, row and col stride will be
                        set to the given value. If a tuple, assign row and
                        col stride from the tuple as (row, col).  Default 2.

        """
        super().__init__()

        # 1x1 convolution, enter the bottleneck
        self.channel_conv_1 = layers.Conv1D(
            filters=out_width // bottleneck,
            name='Downsample_enter',
            kernel_size=1,
            strides=1,
            use_bias=False,
            activation=None,
        )
        self.bn1 = layers.BatchNormalization()
        self.relu1 = layers.ReLU()

        # 3x3 depthwise separable spatial convolution
        self.spatial_conv = layers.SeparableConv1D(filters=out_width,
                                                   name='Downsample_conv',
                                                   kernel_size=3,
                                                   strides=stride,
                                                   use_bias=False,
                                                   activation=None,
                                                   padding='same')
        self.bn2 = layers.BatchNormalization()
        self.relu2 = layers.ReLU()

        # 3x3/2 convolution along main path
        self.main = layers.Conv1D(filters=out_width,
                                  name='Downsample_main',
                                  kernel_size=3,
                                  strides=stride,
                                  use_bias=False,
                                  activation=None,
                                  padding='same')
        self.bn_main = layers.BatchNormalization()
        self.relu_main = layers.ReLU()

        # Merge operation to join residual + main paths
        self.merge = layers.Add()
Exemple #4
0
 def testHugeEnclave(self):
     model = Sequential([
         layers.SeparableConv1D(64,
                                10,
                                input_shape=(500, 64),
                                padding='same')
     ])
     common_test_basis(model, True)
Exemple #5
0
def sep_conv(x, num_filters, kernel_size=3, activation='relu'):
    if activation == 'selu':
        x = layers.SeparableConv1D(num_filters,
                                   kernel_size,
                                   activation='selu',
                                   padding='same',
                                   kernel_initializer='lecun_normal')(x)
    elif activation == 'relu':
        x = layers.SeparableConv1D(num_filters,
                                   kernel_size,
                                   padding='same',
                                   use_bias=False)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)
    else:
        ValueError('Unknown activation function: %s' % (activation, ))
    return x
Exemple #6
0
 def __init__(self, kernel_size, filters, residual=False):
     super(Small_block, self).__init__(name='small_block')
     self.conv = layers.SeparableConv1D(filters,
                                        kernel_size,
                                        padding='same',
                                        use_bias=False)
     self.bn = layers.BatchNormalization(momentum=0.9)
     self.residual = residual
     self.relu = layers.ReLU()
     self.kernel_size = kernel_size
     self.filters = filters
Exemple #7
0
    def __call__(self, x):
        shortcut = x
        prefix = 'expanded_conv/'
        infilters = x.shape[-1]

        if self.block_id:
            # Expand
            prefix = 'expanded_conv_{}/'.format(self.block_id)
            x = layers.Conv1D(_depth(infilters * self.expansion),
                              kernel_size=1,
                              padding='same',
                              use_bias=False,
                              name=prefix + 'expand')(x)
            x = layers.BatchNormalization(epsilon=1e-3,
                                          momentum=0.999,
                                          name=prefix + 'expand/BatchNorm')(x)
            x = self.activation(x)

        if self.stride == 2:
            x = layers.ZeroPadding1D(padding=1,
                                     name=prefix + 'depthwise/pad')(x)

        x = layers.SeparableConv1D(
            int(x.shape[-1]),
            self.kernel_size,
            strides=self.stride,
            padding='same' if self.stride == 1 else 'valid',
            use_bias=False,
            name=prefix + 'depthwise')(x)
        x = layers.BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + "depthwise/BatchNorm")(x)
        x = self.activation(x)

        if self.se_ratio:
            x = SEBlock(_depth(infilters * self.expansion), self.se_ratio,
                        prefix)(x)

        x = layers.Conv1D(self.filters,
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          name=prefix + 'project')(x)
        x = layers.BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + "project/BatchNorm")(x)

        if self.stride == 1 and infilters == self.filters:
            x = layers.Add(name=prefix + 'Add')([shortcut, x])

        return x
Exemple #8
0
    def __call__(self, x):

        x = layers.SeparableConv1D(self.pointwise_conv_filters,
                                   kernel_size=3,
                                   padding='same',
                                   depth_multiplier=self.depth_multiplier,
                                   strides=self.strides,
                                   use_bias=False,
                                   name="Conv_sep")(x)
        x = layers.BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name='Conv_sep_bn')(x)
        x = layers.ReLU(6., name='Conv_sep_relu')(x)
        return x
Exemple #9
0
    def __call__(self, ip):
        with backend.name_scope('separable_conv_block_%s' % self.block_id):
            x = layers.Activation('relu')(ip)
            if self.strides == 2:
                x = layers.ZeroPadding1D(padding=self.kernel_size,
                                         name='separable_conv_1_pad_%s' %
                                         self.block_id)(x)
                conv_pad = 'valid'
            else:
                conv_pad = 'same'

            x = layers.SeparableConv1D(self.filters,
                                       self.kernel_size,
                                       strides=self.strides,
                                       name='separable_conv_1_%s' %
                                       self.block_id,
                                       padding=conv_pad,
                                       use_bias=False,
                                       kernel_initializer='he_normal')(x)
            x = layers.BatchNormalization(momentum=0.9997,
                                          epsilon=1e-3,
                                          name='separable_conv_1_bn_%s' %
                                          self.block_id)(x)
            x = layers.Activation('relu')(x)
            x = layers.SeparableConv1D(self.filters,
                                       self.kernel_size,
                                       name='separable_conv_2_%s' %
                                       self.block_id,
                                       padding='same',
                                       use_bias=False,
                                       kernel_initializer='he_normal')(x)
            x = layers.BatchNormalization(momentum=0.9997,
                                          epsilon=1e-3,
                                          name='separable_conv_2_bn_%s' %
                                          self.block_id)(x)
        return x
Exemple #10
0
    def __call__(self, x):
        in_channels = x.shape[-1]
        pointwise_conv_filters = int(self.filters * self.alpha)
        pointwise_filters = _make_divisible(pointwise_conv_filters, 8)

        inputs = x
        prefix = 'block_{}_'.format(self.block_id)

        if self.block_id:
            x = layers.Conv1D(self.expansion * in_channels,
                              kernel_size=1,
                              padding='same',
                              use_bias=False,
                              activation=None,
                              name=prefix + "expand")(x)
            x = layers.BatchNormalization(epsilon=1e-3,
                                          momentum=0.999,
                                          name=prefix + "expand_bn")(x)
            x = layers.ReLU(6., name=prefix + "expand_relu")(x)
        else:
            prefix = 'expanded_conv_'

        x = layers.SeparableConv1D(int(x.shape[-1]),
                                   kernel_size=self.kernel,
                                   strides=self.stride,
                                   activation=None,
                                   use_bias=False,
                                   padding='same',
                                   name=prefix + 'depthwise')(x)
        x = layers.BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'depthwise_bn')(x)
        x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x)

        x = layers.Conv1D(pointwise_filters,
                          kernel_size=1,
                          padding='same',
                          use_bias=False,
                          activation=None,
                          name=prefix + "project")(x)
        x = layers.BatchNormalization(epsilon=1e-3,
                                      momentum=0.999,
                                      name=prefix + 'project_bn')(x)

        if in_channels == pointwise_filters and self.stride == 1:
            x = layers.Add(name=prefix + 'add')([inputs, x])
        return x
Exemple #11
0
    def __init__(self, out_width, bottleneck=4):
        """
        Constructs a bottleneck block with the final number of output
        feature maps given by `out_width`. Bottlenecked layers will have
        output feature map count given by `out_width // bottleneck`.

        Arguments:
            out_width: Positive integer, number of output feature maps.

            bottleneck: Positive integer, factor by which to bottleneck
                        relative to `out_width`. Default 4.
        """
        super().__init__()

        # 1x1 depthwise convolution, enter the bottleneck
        self.channel_conv_1 = layers.Conv1D(
            filters=out_width // bottleneck,
            name='Bottleneck_enter',
            kernel_size=1,
            strides=1,
            use_bias=False,
            activation=None,
        )
        self.bn1 = layers.BatchNormalization()
        self.relu1 = layers.ReLU()

        # 3x3 depthwise separable convolution
        self.spatial_conv = layers.SeparableConv1D(filters=out_width,
                                                   name='Bottleneck_conv',
                                                   kernel_size=3,
                                                   strides=1,
                                                   use_bias=False,
                                                   activation=None,
                                                   padding='same')

        self.bn2 = layers.BatchNormalization()
        self.relu2 = layers.ReLU()

        # Merge operation to join residual + main paths
        self.merge = layers.Add()
 def __init__(self,
              kernel_size,
              filters,
              layer_name,
              residual=False,
              use_biases=False,
              use_batchnorms=True):
     super(Small_block, self).__init__(name=layer_name)
     self.conv = layers.SeparableConv1D(filters,
                                        kernel_size,
                                        padding='same',
                                        use_bias=use_biases,
                                        bias_initializer='zeros',
                                        name='sepconv')
     if use_batchnorms: self.bn = layers.BatchNormalization(momentum=0.9)
     else: self.bn = None
     self.residual = residual
     self.relu = layers.ReLU()
     self.kernel_size = kernel_size
     self.filters = filters
     self.layer_name = layer_name
     self.use_biases = use_biases
     self.use_batchnorms = use_batchnorms
Exemple #13
0
 def testMediumNative(self):
     model = Sequential([
         layers.SeparableConv1D(3, 3, input_shape=(10, 5), padding='same')
     ])
     common_test_basis(model, False)
Exemple #14
0
 def testLargeEnclave(self):
     model = Sequential([
         layers.SeparableConv1D(10, 5, input_shape=(100, 5), padding='same')
     ])
     common_test_basis(model, True)
Exemple #15
0
            sig_freq_30, sig_freq_31, sig_freq_32, sig_freq_33, sig_freq_34,
            sig_freq_35, sig_freq_36, sig_freq_37, sig_freq_38, sig_freq_39,
            sig_freq_40, sig_freq_41, sig_freq_42, sig_freq_43, sig_freq_44,
            sig_freq_45, sig_freq_46, sig_freq_47, sig_freq_48, sig_freq_49,
            sig_freq_50, sig_freq_51, sig_freq_52, sig_freq_53, sig_freq_54,
            sig_freq_55, sig_freq_56, sig_freq_57, sig_freq_58, sig_freq_59,
            sig_freq_60, sig_freq_61, sig_freq_62
        ]))

    sig_freq_abs_transpose = tf.transpose(sig_freq_abs, perm=(1, 0, 2))
    print("DFT Stack Complete")

    dropout0 = layers.Dropout(0.5)(sig_freq_abs_transpose)
    norm0 = layers.BatchNormalization(axis=1)(dropout0)

    conv1 = layers.SeparableConv1D(512, kernel_size=(4),
                                   activation='relu')(norm0)
    maxpool1 = layers.MaxPooling1D(4)(conv1)
    dropout1 = layers.Dropout(0.3)(maxpool1)
    norm1 = layers.BatchNormalization(axis=1)(dropout1)

    conv2 = layers.SeparableConv1D(512, kernel_size=(4),
                                   activation='relu')(norm1)
    maxpool2 = layers.MaxPooling1D(2)(conv2)
    dropout2 = layers.Dropout(0.3)(maxpool2)
    norm2 = layers.BatchNormalization(axis=1)(dropout2)

    conv3 = layers.SeparableConv1D(256, kernel_size=(4),
                                   activation='relu')(norm2)
    maxpool3 = layers.MaxPooling1D(2)(conv3)
    dropout3 = layers.Dropout(0.3)(maxpool3)
    norm3 = layers.BatchNormalization(axis=1)(dropout3)
Exemple #16
0
def generate_sep_conv1(test_name="small",
                       steps=3,
                       channels=3,
                       filters=3,
                       kernel_size=3,
                       mode='full'):
    declarations = [
        parameter_template.render(name='steps', value=steps),
        parameter_template.render(name='channels', value=channels),
        parameter_template.render(name='filters', value=filters),
        parameter_template.render(name='kernel_size', value=kernel_size)
    ]

    inputs = rng.uniform(-1, 1, (1, steps, channels))

    if mode == 'zeros':
        layer = tf_layers.SeparableConv1D(filters,
                                          kernel_size,
                                          strides=1,
                                          input_shape=inputs.shape,
                                          padding='same',
                                          use_bias=True,
                                          bias_initializer='zeros',
                                          depthwise_initializer='zeros',
                                          pointwise_initializer='zeros')
    elif mode == 'ones':
        inputs = np.ones((1, steps, channels))
        layer = tf_layers.SeparableConv1D(filters,
                                          kernel_size,
                                          strides=1,
                                          input_shape=inputs.shape,
                                          padding='same',
                                          use_bias=True,
                                          bias_initializer='zeros',
                                          depthwise_initializer='ones',
                                          pointwise_initializer='ones')
    elif mode == 'sequential':
        inputs = np.arange(steps * channels, dtype=np.float).reshape(
            (1, steps, channels))
        layer = tf_layers.SeparableConv1D(filters,
                                          kernel_size,
                                          strides=1,
                                          input_shape=inputs.shape,
                                          padding='same',
                                          use_bias=True,
                                          bias_initializer='zeros',
                                          depthwise_initializer='ones',
                                          pointwise_initializer='ones')
    elif mode == 'no_bias':
        layer = tf_layers.SeparableConv1D(
            filters,
            kernel_size,
            strides=1,
            input_shape=inputs.shape,
            padding='same',
            use_bias=True,
            bias_initializer='zeros',
            depthwise_initializer='glorot_uniform',
            pointwise_initializer='glorot_uniform')
    elif mode == 'full':
        layer = tf_layers.SeparableConv1D(
            filters,
            kernel_size,
            strides=1,
            input_shape=inputs.shape,
            padding='same',
            use_bias=True,
            bias_initializer='glorot_uniform',
            depthwise_initializer='glorot_uniform',
            pointwise_initializer='glorot_uniform')
    else:
        raise NotImplementedError("Unknown tests mode")
    declarations.append(generate_array('inputs', inputs))

    results = layer(inputs).numpy()
    params = layer.get_weights()
    depth_kernels = params[0]
    declarations.append(generate_array('depth_kernels', depth_kernels))
    point_kernels = params[1]
    declarations.append(generate_array('point_kernels', point_kernels))
    biases = params[2]
    declarations.append(generate_array('biases', biases))
    declarations.append(generate_array('expected', results))
    declarations.append(ret_template.render(size=steps * filters))

    operator = "sep_conv1(inputs, steps, channels, filters, depth_kernels, point_kernels, kernel_size, biases, ret);"

    return {
        'suite': 'sep_conv1',
        'name': test_name,
        'declarations': declarations,
        'operator': operator
    }
def get_quartznet(input_dim,
                  output_dim,
                  is_mixed_precision=False,
                  fixed_sequence_size=None,
                  num_b_block_repeats=3,
                  b_block_kernel_sizes=(33, 39, 51, 63, 75),
                  b_block_num_channels=(256, 256, 512, 512, 512),
                  num_small_blocks=5,
                  use_biases=False,
                  use_batchnorms=True,
                  use_mask=False,
                  fixed_batch_size=None,
                  random_state=1) -> keras.Model:
    """
    Parameters
    ----------
    input_dim: input feature length
    output_dim: output feature length
    is_mixed_precision: if mixed precision model is needed
    fixed_sequence_size: int, default None. If the length of sequence has to be fixed.
    num_b_block_repeats: 1 is 5x5 quartznet, 2 is 10x5, 3 is 15x5
    b_block_kernel_sizes: iterable, kernel size of each b block
    b_block_num_channels: iterable, number of channels of each b block
    num_small_blocks: int, number of conv blocks inside 1 residual block
    use_biases: if biases are used with convolutions
    use_batchnorms: if batchnorms are inserted after each residual block
    use_mask: if mask layer is used
    fixed_batch_size: int, default None. If the model will have fixed batch size
    random_state: int, state used for weight initialization
     """
    assert len(b_block_kernel_sizes) == len(b_block_num_channels), \
        "Number of kernel sizes not equal the number of channel sizes"

    max_seq_length = None
    if tflite_version:
        max_seq_length = 5

    if is_mixed_precision:
        policy = mixed_precision.Policy('mixed_float16')
        mixed_precision.set_policy(policy)

    np.random.seed(random_state)
    tf.random.set_seed(random_state)

    with tf.device('/cpu:0'):
        input_tensor = layers.Input([fixed_sequence_size, input_dim],
                                    name='X',
                                    batch_size=fixed_batch_size)
        x = tf.identity(input_tensor)
        if use_mask: x = layers.Masking()(x)
        # First encoder layer
        x = layers.SeparableConv1D(256,
                                   33,
                                   padding='same',
                                   strides=2,
                                   name='conv_1',
                                   use_bias=use_biases,
                                   bias_initializer='zeros')(x)
        if use_batchnorms:
            x = layers.BatchNormalization(name='BN-1', momentum=0.9)(x)
        x = layers.ReLU(name='RELU-1')(x)

        block_idx = 1
        for kernel_size, n_channels in zip(b_block_kernel_sizes,
                                           b_block_num_channels):
            for bk in range(num_b_block_repeats):
                x = B_block(kernel_size,
                            n_channels,
                            num_small_blocks,
                            f'B-{block_idx}',
                            use_biases=use_biases,
                            use_batchnorms=use_batchnorms)(x)
                block_idx += 1

        # First final layer
        x = layers.SeparableConv1D(512,
                                   87,
                                   padding='same',
                                   name='conv_2',
                                   dilation_rate=2,
                                   use_bias=use_biases,
                                   bias_initializer='zeros')(x)
        if use_batchnorms:
            x = layers.BatchNormalization(name='BN-2', momentum=0.9)(x)
        x = layers.ReLU(name='RELU-2')(x)

        # Second final layer
        x = layers.Conv1D(1024,
                          1,
                          padding='same',
                          name='conv_3',
                          use_bias=use_biases,
                          bias_initializer='zeros')(x)
        if use_batchnorms:
            x = layers.BatchNormalization(name='BN-3', momentum=0.9)(x)
        x = layers.ReLU(name='RELU-3')(x)

        # Third final layer
        x = layers.Conv1D(output_dim,
                          1,
                          padding='same',
                          dilation_rate=1,
                          name='conv_4')(x)
        model = keras.Model([input_tensor], x, name='QuartzNet')

    if is_mixed_precision:
        policy = mixed_precision.Policy('float32')
        mixed_precision.set_policy(policy)

    return model
Exemple #18
0
 def testSmallEnclave(self):
     model = Sequential(
         [layers.SeparableConv1D(3, 3, input_shape=(5, 3), padding='same')])
     common_test_basis(model, True)
Exemple #19
0
def Xception(include_top=True, weights='hasc', input_shape=None, pooling=None, classes=6, classifier_activation='softmax'):
    if input_shape is None:
        input_shape = (256*3, 1)

    if weights in ['hasc', 'HASC'] and include_top and classes != 6:
        raise ValueError('If using `weights` as `"hasc"` with `include_top`'
                         ' as true, `classes` should be 6')

    inputs = layers.Input(shape=input_shape)

    x = layers.Conv1D(32, 3, strides=2, use_bias=False, name='block1_conv1')(inputs)
    x = layers.BatchNormalization(name='block1_conv1_bn')(x)
    x = layers.Activation('relu', name='block1_conv1_act')(x)
    x = layers.Conv1D(64, 3, use_bias=False, name='block1_conv2')(x)
    x = layers.BatchNormalization(name='block1_conv2_bn')(x)
    x = layers.Activation('relu', name='block1_conv2_act')(x)

    residual = layers.Conv1D(
        128, 1, strides=2, padding='same', use_bias=False
    )(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.SeparableConv1D(128, 3, padding='same', use_bias=False, name='block2_sepconv1')(x)
    x = layers.BatchNormalization(name='block2_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block2_sepconv2_act')(x)
    x = layers.SeparableConv1D(128, 3, padding='same', use_bias=False, name='block2_sepconv2')(x)
    x = layers.BatchNormalization(name='block2_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same', name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv1D(
        256, 1, strides=2, padding='same', use_bias=False
    )(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.Activation('relu', name='block3_sepconv1_act')(x)
    x = layers.SeparableConv1D(256, 3, padding='same', use_bias=False, name='block3_sepconv1')(x)
    x = layers.BatchNormalization(name='block3_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block3_sepconv2_act')(x)
    x = layers.SeparableConv1D(256, 3, padding='same', use_bias=False, name='block3_sepconv2')(x)
    x = layers.BatchNormalization(name='block3_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same', name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv1D(728, 1, strides=2, padding='same', use_bias=False)(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.Activation('relu', name='block4_sepconv1_act')(x)
    x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name='block4_sepconv1')(x)
    x = layers.BatchNormalization(name='block4_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block4_sepconv2_act')(x)
    x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name='block4_sepconv2')(x)
    x = layers.BatchNormalization(name='block4_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same', name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = layers.Activation('relu', name=prefix + "_sepconv1_act")(x)
        x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name=prefix + "_sepconv1")(x)
        x = layers.BatchNormalization(name=prefix + "_sepconv1_bn")(x)
        x = layers.Activation('relu', name=prefix + "_sepconv2_act")(x)
        x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name=prefix + "_sepconv2")(x)
        x = layers.BatchNormalization(name=prefix + "_sepconv2_bn")(x)
        x = layers.Activation('relu', name=prefix + "_sepconv3_act")(x)
        x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name=prefix + "_sepconv3")(x)

        x = layers.add([x, residual])

    residual = layers.Conv1D(1024, 1, strides=2, padding='same', use_bias=False)(x)
    residual = layers.BatchNormalization()(residual)

    x = layers.Activation('relu', name='block13_sepconv1_act')(x)
    x = layers.SeparableConv1D(728, 3, padding='same', use_bias=False, name='block13_sepconv1')(x)
    x = layers.BatchNormalization(name='block13_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block13_speconv2_act')(x)
    x = layers.SeparableConv1D(1024, 3, padding='same', use_bias=False, name='block13_sepconv2')(x)
    x = layers.BatchNormalization(name='block13_sepconv2_bn')(x)

    x = layers.MaxPooling1D(3, strides=2, padding='same')(x)
    x = layers.add([x, residual])

    x = layers.SeparableConv1D(1536, 3, padding='same', use_bias=False, name='block14_sepconv1')(x)
    x = layers.BatchNormalization(name='block14_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv1_act')(x)

    x = layers.SeparableConv1D(2048, 3, padding='same', use_bias=False, name='block14_sepconv2')(x)
    x = layers.BatchNormalization(name='block14_sepconv2_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv2_act')(x)

    x = layers.GlobalAveragePooling1D(name='avg_pool')(x)
    y = layers.Dense(classes, activation=classifier_activation,
                     name='predictions')(x)

    model = Model(inputs, y)

    if weights is not None:
        if weights in ['hasc', "HASC"]:
            weights = 'weights/xception/xception_hasc_weights_{}_{}.hdf5'.format(int(input_shape[0]),
                                                                                 int(input_shape[1]))

        # hasc or weights fileで初期化
        if os.path.exists(weights):
            print("Load weights from {}".format(weights))
            model.load_weights(weights)
        else:
            print("Not exist weights: {}".format(weights))

    # topを含まないとき
    if not include_top:
        if pooling is None:
            # topを削除する
            model = Model(inputs=model.input, outputs=model.layers[-3].output)
        elif pooling == 'avg':
            y = layers.GlobalAveragePooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        elif pooling == 'max':
            y = layers.GlobalMaxPooling1D()(model.layers[-3].output)
            model = Model(inputs=model.input, outputs=y)
        else:
            print("Not exist pooling option: {}".format(pooling))
            model = Model(inputs=model.input, outputs=model.layers[-3].output)

    return model
Exemple #20
0
def get_quartznet(input_dim,
                  output_dim,
                  is_mixed_precision=False,
                  tflite_version=False,
                  num_b_block_repeats=3,
                  b_block_kernel_sizes=(33, 39, 51, 63, 75),
                  b_block_num_channels=(256, 256, 512, 512, 512),
                  num_small_blocks=5,
                  random_state=1) -> keras.Model:
    """
    Parameters
    ----------
    input_dim: input feature length
    output_dim: output feature length
    is_mixed_precision: if mixed precision model is needed
    tflite_version: if export to tflite is needed
    num_b_block_repeats: 1 is 5x5 quartznet, 2 is 10x5, 3 is 15x5
    b_block_kernel_sizes: iterable, kernel size of each b block
    b_block_num_channels: iterable, number of channels of each b block
    """
    assert len(b_block_kernel_sizes) == len(b_block_num_channels), \
        "Number of kernel sizes not equal the number of channel sizes"

    max_seq_length = None
    if tflite_version:
        max_seq_length = 5

    if is_mixed_precision:
        policy = mixed_precision.Policy('mixed_float16')
        mixed_precision.set_policy(policy)

    np.random.seed(random_state)
    tf.random.set_seed(random_state)

    with tf.device('/cpu:0'):
        input_tensor = layers.Input([max_seq_length, input_dim], name='X')

        x = layers.Masking()(input_tensor)
        # First encoder layer
        x = layers.SeparableConv1D(256,
                                   33,
                                   padding='same',
                                   strides=2,
                                   name='conv_1',
                                   use_bias=False)(x)
        x = layers.BatchNormalization(name='BN-1', momentum=0.9)(x)
        x = layers.ReLU(name='RELU-1')(x)

        block_idx = 1
        for kernel_size, n_channels in zip(b_block_kernel_sizes,
                                           b_block_num_channels):
            for bk in range(num_b_block_repeats):
                x = B_block(kernel_size, n_channels, num_small_blocks,
                            f'B-{block_idx}')(x)
                block_idx += 1

        # First final layer
        x = layers.SeparableConv1D(512,
                                   87,
                                   padding='same',
                                   name='conv_2',
                                   dilation_rate=2,
                                   use_bias=False)(x)
        x = layers.BatchNormalization(name='BN-2', momentum=0.9)(x)
        x = layers.ReLU(name='RELU-2')(x)

        # Second final layer
        x = layers.Conv1D(1024,
                          1,
                          padding='same',
                          name='conv_3',
                          use_bias=False)(x)
        x = layers.BatchNormalization(name='BN-3', momentum=0.9)(x)
        x = layers.ReLU(name='RELU-3')(x)

        # Third final layer
        x = layers.Conv1D(output_dim,
                          1,
                          padding='same',
                          dilation_rate=1,
                          name='conv_4')(x)
        model = keras.Model([input_tensor], x, name='QuartzNet')

    if is_mixed_precision:
        policy = mixed_precision.Policy('float32')
        mixed_precision.set_policy(policy)

    return model