Ejemplo n.º 1
0
def apply_blstm(input_tensor, output_name='output', params={}):
    """ Apply BLSTM to the given input_tensor.

    :param input_tensor: Input of the model.
    :param output_name: (Optional) name of the output, default to 'output'.
    :param params: (Optional) dict of BLSTM parameters.
    :returns: Output tensor.
    """
    units = params.get('lstm_units', 250)
    kernel_initializer = he_uniform(seed=50)
    flatten_input = TimeDistributed(Flatten())((input_tensor))

    def create_bidirectional():
        return Bidirectional(
            CuDNNLSTM(
                units,
                kernel_initializer=kernel_initializer,
                return_sequences=True))

    l1 = create_bidirectional()((flatten_input))
    l2 = create_bidirectional()((l1))
    l3 = create_bidirectional()((l2))
    dense = TimeDistributed(
        Dense(
            int(flatten_input.shape[2]),
            activation='relu',
            kernel_initializer=kernel_initializer))((l3))
    output = TimeDistributed(
        Reshape(input_tensor.shape[2:]),
        name=output_name)(dense)
    return output
Ejemplo n.º 2
0
def apply_blstm(input_tensor: tf.Tensor,
                output_name: str = "output",
                params: Optional[Dict] = None) -> tf.Tensor:
    """
    Apply BLSTM to the given input_tensor.

    Parameters:
        input_tensor (tensorflow.Tensor):
            Input of the model.
        output_name (str):
            (Optional) name of the output, default to 'output'.
        params (Optional[Dict]):
            (Optional) dict of BLSTM parameters.

    Returns:
        tensorflow.Tensor:
            Output tensor.
    """
    if params is None:
        params = {}
    units: int = params.get("lstm_units", 250)
    kernel_initializer = he_uniform(seed=50)
    flatten_input = TimeDistributed(Flatten())((input_tensor))

    def create_bidirectional():
        return Bidirectional(
            CuDNNLSTM(units,
                      kernel_initializer=kernel_initializer,
                      return_sequences=True))

    l1 = create_bidirectional()((flatten_input))
    l2 = create_bidirectional()((l1))
    l3 = create_bidirectional()((l2))
    dense = TimeDistributed(
        Dense(
            int(flatten_input.shape[2]),
            activation="relu",
            kernel_initializer=kernel_initializer,
        ))((l3))
    output: tf.Tensor = TimeDistributed(Reshape(input_tensor.shape[2:]),
                                        name=output_name)(dense)
    return output
Ejemplo n.º 3
0
def apply_blstm(input_tensor, output_name='output', params={}):

    units = params.get('lstm_units', 250)
    kernel_initializer = he_uniform(seed=50)
    flatten_input = TimeDistributed(Flatten())((input_tensor))

    def create_bidirectional():
        return Bidirectional(
            CuDNNLSTM(units,
                      kernel_initializer=kernel_initializer,
                      return_sequences=True))

    l1 = create_bidirectional()((flatten_input))
    l2 = create_bidirectional()((l1))
    l3 = create_bidirectional()((l2))
    dense = TimeDistributed(
        Dense(int(flatten_input.shape[2]),
              activation='relu',
              kernel_initializer=kernel_initializer))((l3))
    output = TimeDistributed(Reshape(input_tensor.shape[2:]),
                             name=output_name)(dense)
    return output
Ejemplo n.º 4
0
 def __init__(
     self,
     input_tensor,
     cout = 1,
     output_name = 'output',
     params = {},
     output_mask_logit = False,
     dropout = 0.5,
     training = True,
 ):
     conv_n_filters = params.get(
         'conv_n_filters', [66, 132, 264, 528, 1056, 2112]
     )
     conv_activation_layer = _get_conv_activation_layer(params)
     deconv_activation_layer = _get_deconv_activation_layer(params)
     kernel_initializer = he_uniform(seed = 50)
     conv1d_factory = partial(
         Conv1D,
         strides = (2),
         padding = 'same',
         kernel_initializer = kernel_initializer,
     )
     conv1 = conv1d_factory(conv_n_filters[0], (5))(input_tensor)
     batch1 = BatchNormalization(axis = -1)(conv1, training = training)
     rel1 = conv_activation_layer(batch1)
     conv2 = conv1d_factory(conv_n_filters[1], (5))(rel1)
     batch2 = BatchNormalization(axis = -1)(conv2, training = training)
     rel2 = conv_activation_layer(batch2)
     conv3 = conv1d_factory(conv_n_filters[2], (5))(rel2)
     batch3 = BatchNormalization(axis = -1)(conv3, training = training)
     rel3 = conv_activation_layer(batch3)
     conv4 = conv1d_factory(conv_n_filters[3], (5))(rel3)
     batch4 = BatchNormalization(axis = -1)(conv4, training = training)
     rel4 = conv_activation_layer(batch4)
     conv5 = conv1d_factory(conv_n_filters[4], (5))(rel4)
     batch5 = BatchNormalization(axis = -1)(conv5, training = training)
     rel5 = conv_activation_layer(batch5)
     conv6 = conv1d_factory(conv_n_filters[5], (5))(rel5)
     batch6 = BatchNormalization(axis = -1)(conv6, training = training)
     _ = conv_activation_layer(batch6)
     conv2d_transpose_factory = partial(
         Conv2DTranspose,
         strides = (2, 1),
         padding = 'same',
         kernel_initializer = kernel_initializer,
     )
     up1 = conv2d_transpose_factory(conv_n_filters[4], (5, 1))(
         (tf.expand_dims(conv6, axis = 2))
     )[:, :, 0]
     up1 = deconv_activation_layer(up1)
     batch7 = BatchNormalization(axis = -1)(up1, training = training)
     drop1 = Dropout(dropout)(batch7, training = training)
     merge1 = Concatenate(axis = -1)([conv5, drop1])
     up2 = conv2d_transpose_factory(conv_n_filters[3], (5, 1))(
         (tf.expand_dims(merge1, axis = 2))
     )[:, :, 0]
     up2 = deconv_activation_layer(up2)
     batch8 = BatchNormalization(axis = -1)(up2, training = training)
     drop2 = Dropout(dropout)(batch8, training = training)
     merge2 = Concatenate(axis = -1)([conv4, drop2])
     up3 = conv2d_transpose_factory(conv_n_filters[2], (5, 1))(
         (tf.expand_dims(merge2, axis = 2))
     )[:, :, 0]
     up3 = deconv_activation_layer(up3)
     batch9 = BatchNormalization(axis = -1)(up3, training = training)
     drop3 = Dropout(dropout)(batch9, training = training)
     merge3 = Concatenate(axis = -1)([conv3, drop3])
     up4 = conv2d_transpose_factory(conv_n_filters[1], (5, 1))(
         (tf.expand_dims(merge3, axis = 2))
     )[:, :, 0]
     up4 = deconv_activation_layer(up4)
     batch10 = BatchNormalization(axis = -1)(up4, training = training)
     merge4 = Concatenate(axis = -1)([conv2, batch10])
     up5 = conv2d_transpose_factory(conv_n_filters[0], (5, 1))(
         (tf.expand_dims(merge4, axis = 2))
     )[:, :, 0]
     up5 = deconv_activation_layer(up5)
     batch11 = BatchNormalization(axis = -1)(up5, training = training)
     merge5 = Concatenate(axis = -1)([conv1, batch11])
     up6 = conv2d_transpose_factory(1, (5, 5), strides = (2, 1))(
         (tf.expand_dims(merge5, axis = 2))
     )[:, :, 0]
     up6 = deconv_activation_layer(up6)
     batch12 = BatchNormalization(axis = -1)(up6, training = training)
     if not output_mask_logit:
         up7 = Conv1D(
             cout,
             (4),
             dilation_rate = (2),
             padding = 'same',
             kernel_initializer = kernel_initializer,
         )((batch12))
         output = Multiply(name = output_name)([up7, input_tensor])
         self.logits = output
     else:
         self.logits = Conv1D(
             cout,
             (4),
             dilation_rate = (2),
             padding = 'same',
             kernel_initializer = kernel_initializer,
         )((batch12))
Ejemplo n.º 5
0
    def __init__(
        self,
        input_tensor,
        cout=1,
        ksize=3,
        num_layers=6,
        num_initial_filters=16,
        output_mask_logit=False,
        logging=False,
        dropout=0.5,
        training=False,
    ):
        self.customlayers = lay.Layers()
        conv_activation_layer = _get_conv_activation_layer({})
        deconv_activation_layer = _get_deconv_activation_layer({})
        kernel_initializer = he_uniform(seed=50)

        conv2d_factory = partial(
            Conv2D,
            strides=(2, 2),
            padding='same',
            kernel_initializer=kernel_initializer,
        )

        conv2d_transpose_factory = partial(
            Conv2DTranspose,
            strides=(2, 2),
            padding='same',
            kernel_initializer=kernel_initializer,
        )

        enc_outputs = []
        current_layer = input_tensor
        for i in range(num_layers):
            print(current_layer)
            if i < num_layers - 1:
                current_layer = self.residual_X(
                    current_layer,
                    ksize,
                    inchannel=int(current_layer.shape[-1]),
                    outchannel=num_initial_filters * (2**i),
                    name=f'residual_x_{i}',
                    logging=logging,
                    training=training,
                )
                enc_outputs.append(current_layer)
            else:
                current_layer = conv2d_factory(num_initial_filters * (2**i),
                                               (5, 5))(current_layer)

            if logging:
                print(current_layer)

        for i in range(num_layers - 1):

            current_layer = conv2d_transpose_factory(
                num_initial_filters * (2**(num_layers - i - 2)),
                (5, 5))((current_layer))
            current_layer = deconv_activation_layer(current_layer)
            current_layer = BatchNormalization(axis=-1)(current_layer,
                                                        training=training)
            if i < 3:
                current_layer = Dropout(dropout)(current_layer,
                                                 training=training)
            current_layer = Concatenate(axis=-1)(
                [enc_outputs[-i - 1], current_layer])
            if logging:
                print(current_layer)

        current_layer = conv2d_transpose_factory(1, (5, 5), strides=(2, 2))(
            (current_layer))
        current_layer = deconv_activation_layer(current_layer)
        current_layer = BatchNormalization(axis=-1)(current_layer,
                                                    training=training)

        if not output_mask_logit:
            last = Conv2D(
                cout,
                (4, 4),
                dilation_rate=(2, 2),
                activation='sigmoid',
                padding='same',
                kernel_initializer=kernel_initializer,
            )((current_layer))
            output = Multiply()([last, input_tensor])
            self.logits = output
        else:
            self.logits = Conv2D(
                cout,
                (4, 4),
                dilation_rate=(2, 2),
                padding='same',
                kernel_initializer=kernel_initializer,
            )((current_layer))
Ejemplo n.º 6
0
    def __init__(
        self,
        input_tensor,
        cout=1,
        num_layers=6,
        num_initial_filters=66,
        output_mask_logit=False,
        logging=False,
        dropout=0.5,
        training=True,
    ):
        conv_activation_layer = _get_conv_activation_layer({})
        deconv_activation_layer = _get_deconv_activation_layer({})
        kernel_initializer = he_uniform(seed=50)

        conv1d_factory = partial(
            Conv1D,
            strides=(2),
            padding='same',
            kernel_initializer=kernel_initializer,
        )

        conv2d_transpose_factory = partial(
            Conv2DTranspose,
            strides=(2, 1),
            padding='same',
            kernel_initializer=kernel_initializer,
        )

        def resnet_block(input_tensor, filter_size):

            res = conv1d_factory(filter_size, (1), strides=(1),
                                 use_bias=False)(input_tensor)
            conv1 = conv1d_factory(filter_size, (5), strides=(1))(input_tensor)
            batch1 = BatchNormalization(axis=-1)(conv1, training=training)
            rel1 = conv_activation_layer(batch1)
            conv2 = conv1d_factory(filter_size, (5), strides=(1))(rel1)
            batch2 = BatchNormalization(axis=-1)(conv2, training=training)
            resconnection = Add()([res, batch2])
            rel2 = conv_activation_layer(resconnection)
            return MaxPooling1D(padding='same')(rel2)

        enc_outputs = []
        current_layer = input_tensor
        for i in range(num_layers):

            if i < num_layers - 1:
                current_layer = resnet_block(current_layer,
                                             num_initial_filters * (2**i))
                enc_outputs.append(current_layer)
            else:
                current_layer = conv1d_factory(num_initial_filters * (2**i),
                                               (5))(current_layer)

            if logging:
                print(current_layer)

        for i in range(num_layers - 1):

            current_layer = conv2d_transpose_factory(
                num_initial_filters * (2**(num_layers - i - 2)),
                (5, 1))((tf.expand_dims(current_layer, 2)))[:, :, 0]
            current_layer = deconv_activation_layer(current_layer)
            current_layer = BatchNormalization(axis=-1)(current_layer,
                                                        training=training)
            if i < 3:
                current_layer = Dropout(dropout)(current_layer,
                                                 training=training)
            current_layer = Concatenate(axis=-1)(
                [enc_outputs[-i - 1], current_layer])
            if logging:
                print(current_layer)

        current_layer = conv2d_transpose_factory(1, (5, 1), strides=(2, 1))(
            (tf.expand_dims(current_layer, 2)))[:, :, 0]
        current_layer = deconv_activation_layer(current_layer)
        current_layer = BatchNormalization(axis=-1)(current_layer,
                                                    training=training)

        if not output_mask_logit:
            last = Conv1D(
                cout,
                (4),
                dilation_rate=(2),
                activation=None,
                padding='same',
                kernel_initializer=kernel_initializer,
            )((current_layer))
            output = Multiply()([last, input_tensor])
            self.logits = output
        else:

            self.logits = Conv1D(
                cout,
                (4),
                dilation_rate=(2),
                padding='same',
                kernel_initializer=kernel_initializer,
            )((current_layer))
Ejemplo n.º 7
0
def apply_unet(
        input_tensor,
        output_name='output',
        params={},
        output_mask_logit=False):
    """ Apply a convolutionnal U-net to model a single instrument (one U-net
    is used for each instrument).

    :param input_tensor:
    :param output_name: (Optional) , default to 'output'
    :param params: (Optional) , default to empty dict.
    :param output_mask_logit: (Optional) , default to False.
    """
    logging.info(f'Apply unet for {output_name}')
    conv_n_filters = params.get('conv_n_filters', [16, 32, 64, 128, 256, 512])
    conv_activation_layer = _get_conv_activation_layer(params)
    deconv_activation_layer = _get_deconv_activation_layer(params)
    kernel_initializer = he_uniform(seed=50)
    conv2d_factory = partial(
        Conv2D,
        strides=(2, 2),
        padding='same',
        kernel_initializer=kernel_initializer)
    # First layer.
    conv1 = conv2d_factory(conv_n_filters[0], (5, 5))(input_tensor)
    batch1 = BatchNormalization(axis=-1)(conv1)
    rel1 = conv_activation_layer(batch1)
    # Second layer.
    conv2 = conv2d_factory(conv_n_filters[1], (5, 5))(rel1)
    batch2 = BatchNormalization(axis=-1)(conv2)
    rel2 = conv_activation_layer(batch2)
    # Third layer.
    conv3 = conv2d_factory(conv_n_filters[2], (5, 5))(rel2)
    batch3 = BatchNormalization(axis=-1)(conv3)
    rel3 = conv_activation_layer(batch3)
    # Fourth layer.
    conv4 = conv2d_factory(conv_n_filters[3], (5, 5))(rel3)
    batch4 = BatchNormalization(axis=-1)(conv4)
    rel4 = conv_activation_layer(batch4)
    # Fifth layer.
    conv5 = conv2d_factory(conv_n_filters[4], (5, 5))(rel4)
    batch5 = BatchNormalization(axis=-1)(conv5)
    rel5 = conv_activation_layer(batch5)
    # Sixth layer
    conv6 = conv2d_factory(conv_n_filters[5], (5, 5))(rel5)
    batch6 = BatchNormalization(axis=-1)(conv6)
    _ = conv_activation_layer(batch6)
    #
    #
    conv2d_transpose_factory = partial(
        Conv2DTranspose,
        strides=(2, 2),
        padding='same',
        kernel_initializer=kernel_initializer)
    #
    up1 = conv2d_transpose_factory(conv_n_filters[4], (5, 5))((conv6))
    up1 = deconv_activation_layer(up1)
    batch7 = BatchNormalization(axis=-1)(up1)
    drop1 = Dropout(0.5)(batch7)
    merge1 = Concatenate(axis=-1)([conv5, drop1])
    #
    up2 = conv2d_transpose_factory(conv_n_filters[3], (5, 5))((merge1))
    up2 = deconv_activation_layer(up2)
    batch8 = BatchNormalization(axis=-1)(up2)
    drop2 = Dropout(0.5)(batch8)
    merge2 = Concatenate(axis=-1)([conv4, drop2])
    #
    up3 = conv2d_transpose_factory(conv_n_filters[2], (5, 5))((merge2))
    up3 = deconv_activation_layer(up3)
    batch9 = BatchNormalization(axis=-1)(up3)
    drop3 = Dropout(0.5)(batch9)
    merge3 = Concatenate(axis=-1)([conv3, drop3])
    #
    up4 = conv2d_transpose_factory(conv_n_filters[1], (5, 5))((merge3))
    up4 = deconv_activation_layer(up4)
    batch10 = BatchNormalization(axis=-1)(up4)
    merge4 = Concatenate(axis=-1)([conv2, batch10])
    #
    up5 = conv2d_transpose_factory(conv_n_filters[0], (5, 5))((merge4))
    up5 = deconv_activation_layer(up5)
    batch11 = BatchNormalization(axis=-1)(up5)
    merge5 = Concatenate(axis=-1)([conv1, batch11])
    #
    up6 = conv2d_transpose_factory(1, (5, 5), strides=(2, 2))((merge5))
    up6 = deconv_activation_layer(up6)
    batch12 = BatchNormalization(axis=-1)(up6)
    # Last layer to ensure initial shape reconstruction.
    if not output_mask_logit:
        up7 = Conv2D(
            2,
            (4, 4),
            dilation_rate=(2, 2),
            activation='sigmoid',
            padding='same',
            kernel_initializer=kernel_initializer)((batch12))
        output = Multiply(name=output_name)([up7, input_tensor])
        return output
    return Conv2D(
        2,
        (4, 4),
        dilation_rate=(2, 2),
        padding='same',
        kernel_initializer=kernel_initializer)((batch12))
Ejemplo n.º 8
0
    def __init__(
        self,
        inputs,
        training = True,
        ksize = 5,
        n_layers = 12,
        channels_interval = 24,
        logging = True,
    ):
        conv_activation_layer = _get_conv_activation_layer({})
        kernel_initializer = he_uniform(seed = 50)

        conv1d_factory = partial(
            Conv1D,
            strides = (2),
            padding = 'same',
            kernel_initializer = kernel_initializer,
        )

        def resnet_block(input_tensor, filter_size):

            res = conv1d_factory(
                filter_size, (1), strides = (1), use_bias = False
            )(input_tensor)
            conv1 = conv1d_factory(filter_size, (5), strides = (1))(
                input_tensor
            )
            batch1 = BatchNormalization(axis = -1)(conv1, training = training)
            rel1 = conv_activation_layer(batch1)
            conv2 = conv1d_factory(filter_size, (5), strides = (1))(rel1)
            batch2 = BatchNormalization(axis = -1)(conv2, training = training)
            resconnection = Add()([res, batch2])
            rel2 = conv_activation_layer(resconnection)
            return rel2

        self.n_layers = n_layers
        self.channels_interval = channels_interval
        out_channels = [
            i * self.channels_interval for i in range(1, self.n_layers + 1)
        ]
        self.middle = tf.keras.Sequential()
        self.middle.add(
            tf.keras.layers.Conv1D(
                self.n_layers * self.channels_interval,
                kernel_size = 15,
                strides = 1,
                padding = 'SAME',
                dilation_rate = 1,
            )
        )
        self.middle.add(BatchNormalization(axis = -1))
        self.middle.add(LeakyReLU(0.2))

        decoder_out_channels_list = out_channels[::-1]

        self.decoder = []
        for i in range(self.n_layers):
            self.decoder.append(
                UpSamplingLayer(channel_out = decoder_out_channels_list[i])
            )
        self.out = tf.keras.Sequential()
        self.out.add(
            tf.keras.layers.Conv1D(
                1,
                kernel_size = 1,
                strides = 1,
                padding = 'SAME',
                dilation_rate = 1,
            )
        )
        self.out.add(Activation('tanh'))

        tmp = []
        o = inputs

        for i in range(self.n_layers):
            o = resnet_block(o, out_channels[i])
            tmp.append(o)
            o = o[:, ::2]
            if logging:
                print(o)

        o = self.middle(o, training = training)
        if logging:
            print(o)

        for i in range(self.n_layers):
            o = tf.image.resize(
                o, [tf.shape(o)[0], tf.shape(o)[1] * 2], method = 'nearest'
            )
            o = tf.concat([o, tmp[self.n_layers - i - 1]], axis = 2)
            o = self.decoder[i](o, training = training)
            if logging:
                print(o)

        if logging:
            print(o, inputs)
        o = tf.concat([o, inputs], axis = 2)
        o = self.out(o, training = training)
        self.logits = o