コード例 #1
0
def u_net_32_64(path_weight, iot=True):
    '''
    relevent iot_weight: 'Models/Progressive/U_NET_32-64-IoT.hdf5' - total PRD - 13.3=5.92+7.41 or 0.5 + 0.7 10^-3 for MSE
    :param path_weight:
    :return:
    '''
    input_shape = (2000, 1)
    inputs = Input(shape=input_shape, name='encoder_input_combined')
    # 1
    encoder_32_output, encoder_64_output = Encoder_32_64_iot(
        True, name='adaptive_encoder')(inputs)
    Decoder_32_64_skip = decoder_skip(iot)
    recons_32CG = Decoder_32_64_skip(
        ZeroPadding1D(padding=(0, 32))(encoder_32_output))
    recons_64CG = Decoder_32_64_skip(
        ZeroPadding1D(padding=(63, 0))(encoder_64_output))

    # recons_16CG = Decoder_16_32(Concatenate(axis=1)([np.zeros([1,62,1]), encoder_16_output  ]))

    model_u_unet = Model(inputs, [recons_32CG, recons_64CG],
                         name='U-NET_32_64_iot')

    if path_weight != "":
        print("loading compression net weights")
        model_u_unet.load_weights(path_weight)
    return model_u_unet
コード例 #2
0
    def call(self, inputs, training=False):
        app_model, model_ecg_classification = self.app_net
        self.embeddings_flatten_first = []
        self.embeddings_flatten_second = []
        self.reconstrcuted_signal_full_first = []
        self.reconstrcuted_signal_full_second = []
        for i in range(4):
            offset_input = Lambda(lambda y: y[:, i * 2000:(i + 1) * 2000, ])(
                inputs)
            offset_embeddings_32, offset_embeddings_64 = self.compression_net.layers[
                1](offset_input)

            offset_recons_32 = self.compression_net.layers[4](
                ZeroPadding1D(padding=(0, 32))(offset_embeddings_32))
            self.reconstrcuted_signal_full_first.append(offset_recons_32)
            offset_recons_64 = self.compression_net.layers[4](
                ZeroPadding1D(padding=(63, 0))(offset_embeddings_64))
            self.reconstrcuted_signal_full_second.append(offset_recons_64)

            offset_embeddings_32 = self.policy_predictor_first(
                offset_embeddings_32)
            self.embeddings_flatten_first.append(offset_embeddings_32)
            offset_embeddings_64 = self.policy_predictor_second(
                offset_embeddings_64)
            self.embeddings_flatten_second.append(offset_embeddings_64)

        x_reconstructed_first = Concatenate(axis=1)(
            self.reconstrcuted_signal_full_first)
        x_reconstructed_second = Concatenate(axis=1)(
            self.reconstrcuted_signal_full_second)
        x_skip = Lambda(lambda y: y[:, 8000:, ])(inputs)
        out_reconstrcuted_first = Concatenate(
            axis=1, name='out_recons_first')([x_reconstructed_first, x_skip])
        out_reconstrcuted_second = Concatenate(
            axis=1, name='out_recons_second')([x_reconstructed_second, x_skip])

        x_downstream_loss_first = Concatenate(axis=1)(
            self.embeddings_flatten_first)
        x_downstream_loss_second = Concatenate(axis=1)(
            self.embeddings_flatten_second)
        x_downstream_loss_pred_first = self.dense_critic_1(
            x_downstream_loss_first)
        x_downstream_loss_pred_second = self.dense_critic_1(
            x_downstream_loss_second)
        # x_reconstructed_loss_pred_first = self.dense_critic_2(x_downstream_loss_first)
        # x_reconstructed_loss_pred_second = self.dense_critic_2(x_downstream_loss_second)

        classification_first = model_ecg_classification(
            out_reconstrcuted_first)
        classification_second = model_ecg_classification(
            out_reconstrcuted_second)

        RR_first = slide_window(app_model, out_reconstrcuted_first)
        RR_second = slide_window(app_model, out_reconstrcuted_second)

        return [
            out_reconstrcuted_first, out_reconstrcuted_second,
            classification_first, classification_second, RR_first, RR_second,
            x_downstream_loss_pred_first, x_downstream_loss_pred_second
        ]
コード例 #3
0
 def build(self, input_shape):
     self.layers = [
         Conv1D(filters=self.units, kernel_size=3, activation='relu'), 
         ZeroPadding1D(padding=(1, 1)), 
         Dropout(0.2), 
         Conv1D(filters=self.units, kernel_size=3), 
         ZeroPadding1D(padding=(1, 1)), 
     ]
コード例 #4
0
ファイル: simple_models.py プロジェクト: xusky69/FlowKet
def real_values_1d_model():
    input_layer = Input((7, ))
    first_conv_layer = Conv1D(16, kernel_size=3, strides=1)
    second_conv_layer = Conv1D(8, kernel_size=3, strides=1)
    x = ExpandInputDim()(input_layer)
    x = first_conv_layer(ZeroPadding1D(1)(x))
    x = Activation('relu')(x)
    x = second_conv_layer(ZeroPadding1D(1)(x))
    x = Activation('relu')(x)
    x = Conv1D(20, kernel_size=1)(x)
    x = Flatten()(x)
    first_dense_layer = Dense(7)
    second_dense_layer = Dense(1)
    x = first_dense_layer(x)
    x = Activation('relu')(x)
    x = second_dense_layer(x)
    return Model(input_layer, x)
コード例 #5
0
ファイル: discriminators.py プロジェクト: nielsrolf/ddsp
 def __init__(self, padding, conv_channels, kernel_size, dilation_rate, nonlinear_activation, nonlinear_activation_params):
   super().__init__([
     ZeroPadding1D(padding),
     tfa.layers.WeightNormalization(
       Conv1D(conv_channels, kernel_size=kernel_size, padding='valid', dilation_rate=dilation_rate,
         kernel_initializer=tf.keras.initializers.HeNormal())),
     getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)
   ])
コード例 #6
0
 def __init__(self,
              output_channels: int,
              input_channels: Optional[int] = None,
              kernel_size: int = 3,
              pooling_size: int = 1,
              batch_normalization: bool = True,
              dropout_rate: float = 0.0,
              l2_regularization: float = 0.0):
     super().__init__()
     leaky_relu = LeakyReLU(alpha=0.01)
     dimension_decrease_factor = 4
     if batch_normalization:
         self.batch_normalization = BatchNormalization(scale=False)
         self.batch_normalization1 = BatchNormalization(scale=False)
         self.batch_normalization2 = BatchNormalization(scale=False)
     else:
         self.batch_normalization = None
     if l2_regularization > 0:
         l2_regularizer = L2(l2_regularization)
     else:
         l2_regularizer = None
     self.dimension_decrease_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=1,
         activation=leaky_relu,
         kernel_regularizer=l2_regularizer)
     self.convolutional_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=kernel_size,
         activation=leaky_relu,
         padding='same',
         kernel_regularizer=l2_regularizer)
     self.dimension_increase_layer = Convolution1D(
         output_channels,
         kernel_size=1,
         activation=leaky_relu,
         kernel_regularizer=l2_regularizer)
     if pooling_size > 1:
         self.pooling_layer = MaxPooling1D(pool_size=pooling_size,
                                           padding='same')
     else:
         self.pooling_layer = None
     if input_channels is not None and output_channels != input_channels:
         if output_channels < input_channels:
             raise NotImplementedError(
                 f'Residual blocks with less output channels than input channels is not'
                 f'implemented. Output channels was {output_channels} and input was'
                 f'{input_channels}')
         self.dimension_change_permute0 = Permute((2, 1))
         self.dimension_change_layer = ZeroPadding1D(
             padding=(0, output_channels - input_channels))
         self.dimension_change_permute1 = Permute((2, 1))
     else:
         self.dimension_change_layer = None
     if dropout_rate > 0:
         self.dropout_layer = SpatialDropout1D(rate=dropout_rate)
     else:
         self.dropout_layer = None
コード例 #7
0
def conv_unit(inp, n_gram, no_word=200, window=2):
    out = Conv1D(no_word,
                 window,
                 strides=1,
                 padding="valid",
                 activation='relu')(inp)
    out = TimeDistributed(Dense(5, input_shape=(n_gram, no_word)))(out)
    out = ZeroPadding1D(padding=(0, window - 1))(out)
    return out
コード例 #8
0
    def __call__(self, x):
        inputs = x
        in_channels = x.shape[-1]
        pointwise_conv_filters = int(self.filters * self.alpha)
        pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
        prefix = 'block_{}_'.format(self.block_id)

        if self.block_id:
            # Expand
            x = Conv1D(self.expansion * in_channels,
                       kernel_size=1,
                       padding='same',
                       use_bias=False,
                       activation=None,
                       name=prefix + 'expand')(x)
            x = BatchNormalization(epsilon=1e-3,
                                   momentum=0.999,
                                   name=prefix + 'expand_BN')(x)
            x = ReLU(6., name=prefix + 'expand_relu')(x)
        else:
            prefix = 'expanded_conv_'

        # Depthwise
        if self.stride == 2:
            x = ZeroPadding1D(padding=1, name=prefix + 'pad')(x)

        x = SeparableConv1D(int(x.shape[-1]),
                            kernel_size=3,
                            strides=self.stride,
                            activation=None,
                            use_bias=False,
                            padding='same' if self.stride == 1 else 'valid',
                            name=prefix + 'depthwise')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'depthwise_BN')(x)

        x = ReLU(6., name=prefix + 'depthwise_relu')(x)

        # Project
        x = Conv1D(pointwise_filters,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'project')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'project_BN')(x)

        if in_channels == pointwise_filters and self.stride == 1:
            return Add(name=prefix + 'add')([inputs, x])

        return x
コード例 #9
0
def music_sincnet(
    filter_size=2501,
    sr=22050,
    filter_num=256,
    amplifying_ratio=16,
    drop_rate=0.5,
    weight_decay=1e-4,
    num_classes=50,
):

    # Input&Reshape
    inp = Input(shape=(59049, 1))
    # x = Reshape([-1, 1])(x)
    # MusicSinc
    x = ZeroPadding1D(padding=(filter_size - 1) // 2)(inp)
    x = MusicSinc1D(filter_num, filter_size, sr)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    # Strided Conv
    num_features = int(filter_num // 2)
    x = Conv1D(
        num_features,
        kernel_size=3,
        strides=3,
        padding="valid",
        use_bias=True,
        kernel_regularizer=l2(weight_decay),
        kernel_initializer="he_uniform",
    )(inp)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    # rese-block
    layer_outputs = []
    for i in range(9):
        num_features *= 2 if (i == 2 or i == 8) else 1
        x = rese_block(x, num_features, weight_decay, amplifying_ratio)
        layer_outputs.append(x)

    x = Concatenate()(
        [GlobalMaxPool1D()(output) for output in layer_outputs[-3:]])
    x = Dense(x.shape[-1], kernel_initializer="glorot_uniform")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)
    if drop_rate > 0.0:
        x = Dropout(drop_rate)(x)
    out = Dense(num_classes,
                activation="sigmoid",
                kernel_initializer="glorot_uniform")(x)

    model = Model(inputs=inp, outputs=out)
    model.summary()
    return model
コード例 #10
0
def DenseNet(nb_dense_block, nb_layers, growth_rate, nb_filter, reduction, dropout_rate, classes=12):

    eps = 1.1e-5
    # compute compression factor
    compression = 1.0 - reduction
    global concat_axis
    concat_axis = 2
    img_input = Input(shape=(250, 6), name='data')


    # Initial convolution
    x = ZeroPadding1D(3)(img_input)
    x = Conv1D(nb_filter, 7, 2, use_bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=concat_axis)(x)
    x = Activation('relu')(x)
    x = ZeroPadding1D(1)(x)
    x = MaxPooling1D(3, strides=2)(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        stage = block_idx+2
        x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate)

        # Add transition_block
        x = transition_block(x, nb_filter, compression=compression, dropout_rate=dropout_rate)
        nb_filter = int(nb_filter * compression)

    x, nb_filter = dense_block(x, nb_layers, nb_filter, growth_rate, dropout_rate=dropout_rate)

    x = BatchNormalization(epsilon=eps, axis=concat_axis)(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling1D()(x)
    x = Dropout(dropout_rate)(x)
    x = Dense(classes)(x)
    x = Activation('softmax')(x)

    model = Model(img_input, x)

    return model
コード例 #11
0
 def __init__(self,
              output_channels: int,
              input_channels: Optional[int] = None,
              kernel_size: int = 3,
              pooling_size: int = 1,
              dropout_rate: float = 0.0):
     super().__init__()
     dimension_decrease_factor = 4
     kernel_initializer = LecunNormal()
     self.dimension_decrease_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=1,
         activation=selu,
         kernel_initializer=kernel_initializer)
     self.convolutional_layer = Convolution1D(
         output_channels // dimension_decrease_factor,
         kernel_size=kernel_size,
         activation=selu,
         padding='same',
         kernel_initializer=kernel_initializer)
     self.dimension_increase_layer = Convolution1D(
         output_channels,
         kernel_size=1,
         activation=selu,
         kernel_initializer=kernel_initializer)
     if pooling_size > 1:
         self.pooling_layer = AveragePooling1D(pool_size=pooling_size,
                                               padding='same')
     else:
         self.pooling_layer = None
     if input_channels is not None and output_channels != input_channels:
         if output_channels < input_channels:
             raise NotImplementedError(
                 f'Residual blocks with less output channels than input channels is not'
                 f'implemented. Output channels was {output_channels} and input was'
                 f'{input_channels}')
         self.dimension_change_permute0 = Permute((2, 1))
         self.dimension_change_layer = ZeroPadding1D(
             padding=(0, output_channels - input_channels))
         self.dimension_change_permute1 = Permute((2, 1))
     else:
         self.dimension_change_layer = None
     if dropout_rate > 0:
         self.dropout_layer = AlphaDropout(rate=dropout_rate,
                                           noise_shape=(50, 1,
                                                        output_channels))
     else:
         self.dropout_layer = None
コード例 #12
0
def causal_conv_1d(x,
                   filters,
                   kernel_size,
                   dilation_rate=1,
                   activation=None,
                   dtype=tensorflow.complex64):
    padding = kernel_size + (kernel_size - 1) * (dilation_rate - 1) - 1
    if padding > 0:
        x = ZeroPadding1D(padding=(padding, 0))(x)
    x = ComplexConv1D(filters=filters,
                      kernel_size=kernel_size,
                      strides=1,
                      dilation_rate=dilation_rate,
                      dtype=dtype)(x)
    if activation is not None:
        x = Activation(activation)(x)
    return x
コード例 #13
0
    def __init__(self, norm_output, **kwargs):
        super(Decoder_shared_iot, self).__init__(**kwargs)
        self.norm_output = norm_output

        self.upsam1 = UpSampling1D(size=2)
        self.zeroPadding = ZeroPadding1D(padding=(1, 1))
        self.conv3 = Conv1D(filters=64,
                            kernel_size=7,
                            activation='swish',
                            padding='same')
        self.conv4 = Conv1D(filters=128,
                            kernel_size=7,
                            activation='swish',
                            padding='same')
        # 20
        self.upsam2 = UpSampling1D(size=2)
        self.conv5 = Conv1D(filters=16,
                            kernel_size=3,
                            activation='swish',
                            padding='same')
        self.conv6 = Conv1D(filters=32,
                            kernel_size=5,
                            activation='swish',
                            padding='same')
        self.upsam3 = UpSampling1D(size=2)
        self.conv7 = Conv1D(filters=32,
                            kernel_size=3,
                            activation='swish',
                            padding='same')
        # 25
        #         self.upsam4 = UpSampling1D(size=2)
        self.conv8 = Conv1D(filters=8,
                            kernel_size=3,
                            activation='swish',
                            padding='same')
        self.conv9 = Conv1D(filters=2,
                            kernel_size=3,
                            activation='swish',
                            padding='same')
        self.flatten = Flatten()

        self.Dense = Dense(2000, name='outputs_decoder')
コード例 #14
0
def conv_block(x, nb_filter, dropout_rate=None):

    eps = 1.1e-5
    # 1 Convolution (Bottleneck layer)
    inter_channel = nb_filter * 4
    x = BatchNormalization(epsilon=eps, axis=concat_axis)(x)
    x = Activation('relu')(x)
    x = Conv1D(inter_channel, 1, use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    # 3 Convolution
    x = BatchNormalization(epsilon=eps, axis=concat_axis)(x)
    x = Activation('relu')(x)
    x = ZeroPadding1D(1)(x)
    x = Conv1D(nb_filter, 3, use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x
コード例 #15
0
def causal_conv_1d(x,
                   filters,
                   kernel_size,
                   weights_normalization,
                   dilation_rate=1,
                   activation=None,
                   skip_connection=None):
    padding = kernel_size + (kernel_size - 1) * (dilation_rate - 1) - 1
    if padding > 0:
        x = ZeroPadding1D(padding=(padding, 0))(x)
    conv_layer = Conv1D(filters=filters,
                        kernel_size=kernel_size,
                        strides=1,
                        dilation_rate=dilation_rate)
    if weights_normalization:
        conv_layer = WeightNormalization(conv_layer)
    x = conv_layer(x)
    if skip_connection is not None:
        x = Add()([x, skip_connection])
    if activation is not None:
        x = Activation(activation)(x)
    return x
コード例 #16
0
    def backend(self, x):
        #     x = UpSampling1D(size=16)(x)
        x = self.unpool(x)

        if self.supervised:
            x = self.backend_supervised(x, self.X1)

            # DECONVOLUTION
        x = ZeroPadding1D((32, 31), name='zero_padding_deconv')(x) # zero pad the time series before doing 1d convolution
        x = Permute((2, 1), input_shape=(1024, 128))(x)
        x = Lambda(lambda x: K.expand_dims(x, axis=3))(x) # add dimension to input for 1d conv

        kernel = Lambda(lambda x: K.transpose(x))(self.w1.kernel) # transpose kernel
        kernel = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(kernel) # switch kernel dimensions up
        kernel = Lambda(lambda x: K.expand_dims(x, axis=3))(kernel) # add dimension to the kernel
       
        x = Deconvolution1D(kernel)(x)
#         x = Lambda(lambda x: K.conv2d(x[0], x[1], padding='valid'))([x, kernel])
        
        x = Lambda(lambda x: K.squeeze(x, axis=3))(x) # remove extra dimension
        x = Permute((2, 1), input_shape=(1, 1024))(x) # permute output dims
     
        return x
コード例 #17
0
ファイル: mobilenet.py プロジェクト: Shakshi3104/tfgarden
    def __call__(self, x):
        if self.strides != 1:
            x = ZeroPadding1D((0, 1), name='conv_pad_%d' % self.block_id)(x)

        x = SeparableConv1D(int(x.shape[-1]),
                            3,
                            padding='same' if self.strides == 1 else 'valid',
                            depth_multiplier=self.depth_multipliter,
                            strides=self.strides,
                            use_bias=False,
                            name='conv_dw_%d' % self.block_id)(x)
        x = BatchNormalization(name='conv_dw_%d_bn' % self.block_id)(x)
        x = ReLU(6., name='conv_dw_%d_relu' % self.block_id)(x)

        x = Conv1D(self.pointwise_conv_filter,
                   1,
                   padding='same',
                   use_bias=False,
                   strides=1,
                   name='conv_pw_%d' % self.block_id)(x)
        x = BatchNormalization(name='conv_pw_%d_bn' % self.block_id)(x)
        x = ReLU(6., name='conv_pw_%d_relu' % self.block_id)(x)
        return x
コード例 #18
0
x_test = x_test.reshape(x_test.shape[0], 10, 1)


# -----------------------------------------------------------------------------------------------------
from tensorflow.keras.models import Sequential, load_model, Model
from tensorflow.keras.layers import Conv1D, Conv2D, Dense, Dropout, MaxPooling2D, MaxPooling1D, BatchNormalization, Flatten, Input
from tensorflow.keras.layers import Activation, ZeroPadding2D, Concatenate, AveragePooling2D, ZeroPadding1D, AveragePooling1D
from tensorflow.keras.regularizers import l2

input = Input(shape=(10, 1))

<<<<<<< HEAD
conv1_7x7_s2 = Conv1D(64, 2, strides=1, padding='valid', activation='relu', name='conv1/7x7_s2', kernel_regularizer=l2(0.0002))(input)
pool1_helper = BatchNormalization()(conv1_7x7_s2)
=======
input_pad = ZeroPadding1D(padding=(3, 3))(input)
conv1_7x7_s2 = Conv1D(64, 2, strides=1, padding='valid', activation='relu', name='conv1/7x7_s2', kernel_regularizer=l2(0.0002))(input_pad)
conv1_zero_pad = ZeroPadding1D(padding=1)(conv1_7x7_s2)
pool1_helper = BatchNormalization()(conv1_zero_pad)
>>>>>>> 3ea26d3fe05deb1dad4d912ac88003632ad3c404
pool1_3x3_s2 = MaxPooling1D(pool_size=2, strides=1, padding='valid', name='pool1/3x3_s2')(pool1_helper)

conv2_3x3_reduce = Conv1D(64, 1, padding='same', activation='relu', name='conv2/3x3_reduce', kernel_regularizer=l2(0.0002))(pool1_3x3_s2)
conv2_3x3 = Conv1D(192, 2, padding='same', activation='relu', name='conv2/3x3', kernel_regularizer=l2(0.0002))(conv2_3x3_reduce)
<<<<<<< HEAD
pool2_helper = BatchNormalization()(conv2_3x3)
=======
conv2_zero_pad = ZeroPadding1D(padding=1)(conv2_3x3)
pool2_helper = BatchNormalization()(conv2_zero_pad)
>>>>>>> 3ea26d3fe05deb1dad4d912ac88003632ad3c404
pool2_3x3_s2 = MaxPooling1D(pool_size=2, strides=1, padding='valid', name='pool2/3x3_s2')(pool2_helper)
コード例 #19
0
def test_delete_channels_zeropadding1d(channel_index):
    layer = ZeroPadding1D(3)
    layer_test_helper_flatten_1d(layer, channel_index)
コード例 #20
0
    def build_model(data_handler, m_params):

        K.set_floatx('float64')

        levels = m_params.levels
        blocks = m_params.blocks
        dil_chan = m_params.dil_chan
        res_chan = m_params.res_chan
        skip_chan = m_params.skip_chan
        initial_kernel = m_params.init_kernel
        kernel_size = m_params.kernel
        l2_decay = m_params.l2_decay
        kernel_init = m_params.kernel_init

        input_chan = data_handler.get_data_channels()
        cond_chan = data_handler.get_label_channels()

        start_pad = m_params.start_pad

        # Inputs
        input_layer = Input(shape=(
            None,
            input_chan,
        ))
        label_input = Input(shape=(
            None,
            cond_chan,
        ))

        # Padding the data
        x = ZeroPadding1D((start_pad, 0), name='start_pad')(input_layer)
        x = Conv1D(res_chan,
                   initial_kernel,
                   use_bias=True,
                   kernel_initializer=kernel_init,
                   name='start-conv',
                   kernel_regularizer=l2(l2_decay))(x)

        skip_connection = 0

        for i in range(blocks):
            neural_levels = levels
            dilation_rate = 1
            if i == (blocks - 1):
                neural_levels = levels - 1
            for j in range(neural_levels):

                layer_name = "_Block_" + str(i) + "_Level_" + str(j)

                residual_input = x

                label_conv = Conv1D(
                    dil_chan * 2,
                    1,
                    use_bias=True,
                    kernel_initializer=kernel_init,
                    name=('label_conv' + layer_name),
                    kernel_regularizer=l2(l2_decay))(label_input)

                x = Conv1D(dil_chan * 2,
                           kernel_size,
                           padding="causal",
                           dilation_rate=dilation_rate,
                           use_bias=True,
                           kernel_initializer=kernel_init,
                           name=('dilated_conv' + layer_name),
                           kernel_regularizer=l2(l2_decay))(x)

                x = Add(name=('add_label_dil' + layer_name))([x, label_conv])

                filter_layer, gate_layer = Lambda(split_layer,
                                                  arguments={
                                                      'parts': 2,
                                                      'axis': 2
                                                  },
                                                  name=('split' +
                                                        layer_name))(x)

                filter_layer = Activation(
                    'tanh', name=('filter' + layer_name))(filter_layer)
                gate_layer = Activation('sigmoid',
                                        name=('gate' + layer_name))(gate_layer)

                x = Multiply(name=('multiply' +
                                   layer_name))([filter_layer, gate_layer])

                skip_layer = x

                skip_layer = Conv1D(
                    skip_chan,
                    1,
                    use_bias=True,
                    kernel_initializer=kernel_init,
                    name=('skip' + layer_name),
                    kernel_regularizer=l2(l2_decay))(skip_layer)

                try:
                    skip_connection = Add(name=('add_skip' + layer_name))(
                        [skip_layer, skip_connection])
                except:
                    skip_connection = skip_layer

                x = Conv1D(res_chan,
                           1,
                           use_bias=True,
                           kernel_initializer=kernel_init,
                           name=('residual' + layer_name),
                           kernel_regularizer=l2(l2_decay))(x)

                x = Add(name=('add_res' + layer_name))([x, residual_input])

                dilation_rate = dilation_rate * 2

        label_output = Conv1D(skip_chan,
                              1,
                              use_bias=True,
                              kernel_initializer=kernel_init,
                              name='label_out',
                              kernel_regularizer=l2(l2_decay))(label_input)

        x = Add(name='add_skip_out')([skip_connection, label_output])

        x = Activation("tanh", name='output')(x)

        x = Conv1D(skip_chan,
                   1,
                   use_bias=True,
                   kernel_initializer=kernel_init,
                   name='final-output',
                   kernel_regularizer=l2(l2_decay))(x)

        network = Model([input_layer, label_input], x)

        return network
コード例 #21
0
def get_test_model_exhaustive():
    """Returns a exhaustive test model."""
    input_shapes = [(2, 3, 4, 5, 6), (2, 3, 4, 5, 6), (7, 8, 9, 10),
                    (7, 8, 9, 10), (11, 12, 13), (11, 12, 13), (14, 15),
                    (14, 15), (16, ),
                    (16, ), (2, ), (1, ), (2, ), (1, ), (1, 3), (1, 4),
                    (1, 1, 3), (1, 1, 4), (1, 1, 1, 3), (1, 1, 1, 4),
                    (1, 1, 1, 1, 3), (1, 1, 1, 1, 4), (26, 28, 3), (4, 4, 3),
                    (4, 4, 3), (4, ), (2, 3), (1, ), (1, ), (1, ), (2, 3),
                    (9, 16, 1), (1, 9, 16)]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    outputs.append(Conv1D(1, 3, padding='valid')(inputs[6]))
    outputs.append(Conv1D(2, 1, padding='same')(inputs[6]))
    outputs.append(Conv1D(3, 4, padding='causal', dilation_rate=2)(inputs[6]))
    outputs.append(ZeroPadding1D(2)(inputs[6]))
    outputs.append(Cropping1D((2, 3))(inputs[6]))
    outputs.append(MaxPooling1D(2)(inputs[6]))
    outputs.append(MaxPooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(MaxPooling1D(2, data_format="channels_first")(inputs[6]))
    outputs.append(AveragePooling1D(2)(inputs[6]))
    outputs.append(AveragePooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(
        AveragePooling1D(2, data_format="channels_first")(inputs[6]))
    outputs.append(GlobalMaxPooling1D()(inputs[6]))
    outputs.append(GlobalMaxPooling1D(data_format="channels_first")(inputs[6]))
    outputs.append(GlobalAveragePooling1D()(inputs[6]))
    outputs.append(
        GlobalAveragePooling1D(data_format="channels_first")(inputs[6]))

    outputs.append(Conv2D(4, (3, 3))(inputs[4]))
    outputs.append(Conv2D(4, (3, 3), use_bias=False)(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), padding='same', dilation_rate=(2, 3))(inputs[4]))

    outputs.append(SeparableConv2D(3, (3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((1, 2))(inputs[4]))

    outputs.append(MaxPooling2D((2, 2))(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    # outputs.append(MaxPooling2D((2, 2), data_format="channels_first")(inputs[4])) # Default MaxPoolingOp only supports NHWC on device type CPU
    outputs.append(
        MaxPooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(AveragePooling2D((2, 2))(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    # outputs.append(AveragePooling2D((2, 2), data_format="channels_first")(inputs[4])) # Default AvgPoolingOp only supports NHWC on device type CPU
    outputs.append(
        AveragePooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))

    outputs.append(GlobalAveragePooling2D()(inputs[4]))
    outputs.append(
        GlobalAveragePooling2D(data_format="channels_first")(inputs[4]))
    outputs.append(GlobalMaxPooling2D()(inputs[4]))
    outputs.append(GlobalMaxPooling2D(data_format="channels_first")(inputs[4]))

    outputs.append(Permute((3, 4, 1, 5, 2))(inputs[0]))
    outputs.append(Permute((1, 5, 3, 2, 4))(inputs[0]))
    outputs.append(Permute((3, 4, 1, 2))(inputs[2]))
    outputs.append(Permute((2, 1, 3))(inputs[4]))
    outputs.append(Permute((2, 1))(inputs[6]))
    outputs.append(Permute((1, ))(inputs[8]))

    outputs.append(Permute((3, 1, 2))(inputs[31]))
    outputs.append(Permute((3, 1, 2))(inputs[32]))
    outputs.append(BatchNormalization()(Permute((3, 1, 2))(inputs[31])))
    outputs.append(BatchNormalization()(Permute((3, 1, 2))(inputs[32])))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(axis=1)(inputs[0]))
    outputs.append(BatchNormalization(axis=2)(inputs[0]))
    outputs.append(BatchNormalization(axis=3)(inputs[0]))
    outputs.append(BatchNormalization(axis=4)(inputs[0]))
    outputs.append(BatchNormalization(axis=5)(inputs[0]))
    outputs.append(BatchNormalization()(inputs[2]))
    outputs.append(BatchNormalization(axis=1)(inputs[2]))
    outputs.append(BatchNormalization(axis=2)(inputs[2]))
    outputs.append(BatchNormalization(axis=3)(inputs[2]))
    outputs.append(BatchNormalization(axis=4)(inputs[2]))
    outputs.append(BatchNormalization()(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    # outputs.append(BatchNormalization(axis=1)(inputs[4])) # tensorflow.python.framework.errors_impl.InternalError:  The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now.
    outputs.append(BatchNormalization(axis=2)(inputs[4]))
    outputs.append(BatchNormalization(axis=3)(inputs[4]))
    outputs.append(BatchNormalization()(inputs[6]))
    outputs.append(BatchNormalization(axis=1)(inputs[6]))
    outputs.append(BatchNormalization(axis=2)(inputs[6]))
    outputs.append(BatchNormalization()(inputs[8]))
    outputs.append(BatchNormalization(axis=1)(inputs[8]))
    outputs.append(BatchNormalization()(inputs[27]))
    outputs.append(BatchNormalization(axis=1)(inputs[27]))
    outputs.append(BatchNormalization()(inputs[14]))
    outputs.append(BatchNormalization(axis=1)(inputs[14]))
    outputs.append(BatchNormalization(axis=2)(inputs[14]))
    outputs.append(BatchNormalization()(inputs[16]))
    # todo: check if TensorFlow >= 2.1 supports this
    # outputs.append(BatchNormalization(axis=1)(inputs[16])) # tensorflow.python.framework.errors_impl.InternalError:  The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now.
    outputs.append(BatchNormalization(axis=2)(inputs[16]))
    outputs.append(BatchNormalization(axis=3)(inputs[16]))
    outputs.append(BatchNormalization()(inputs[18]))
    outputs.append(BatchNormalization(axis=1)(inputs[18]))
    outputs.append(BatchNormalization(axis=2)(inputs[18]))
    outputs.append(BatchNormalization(axis=3)(inputs[18]))
    outputs.append(BatchNormalization(axis=4)(inputs[18]))
    outputs.append(BatchNormalization()(inputs[20]))
    outputs.append(BatchNormalization(axis=1)(inputs[20]))
    outputs.append(BatchNormalization(axis=2)(inputs[20]))
    outputs.append(BatchNormalization(axis=3)(inputs[20]))
    outputs.append(BatchNormalization(axis=4)(inputs[20]))
    outputs.append(BatchNormalization(axis=5)(inputs[20]))

    outputs.append(Dropout(0.5)(inputs[4]))

    outputs.append(ZeroPadding2D(2)(inputs[4]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[4]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[4]))
    outputs.append(Cropping2D(2)(inputs[4]))
    outputs.append(Cropping2D((2, 3))(inputs[4]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[4]))

    outputs.append(Dense(3, use_bias=True)(inputs[13]))
    outputs.append(Dense(3, use_bias=True)(inputs[14]))
    outputs.append(Dense(4, use_bias=False)(inputs[16]))
    outputs.append(Dense(4, use_bias=False, activation='tanh')(inputs[18]))
    outputs.append(Dense(4, use_bias=False)(inputs[20]))

    outputs.append(Reshape(((2 * 3 * 4 * 5 * 6), ))(inputs[0]))
    outputs.append(Reshape((2, 3 * 4 * 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4 * 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4, 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4, 5, 6))(inputs[0]))

    outputs.append(Reshape((16, ))(inputs[8]))
    outputs.append(Reshape((2, 8))(inputs[8]))
    outputs.append(Reshape((2, 2, 4))(inputs[8]))
    outputs.append(Reshape((2, 2, 2, 2))(inputs[8]))
    outputs.append(Reshape((2, 2, 1, 2, 2))(inputs[8]))

    outputs.append(RepeatVector(3)(inputs[8]))

    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='bilinear')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='bilinear')(inputs[4]))

    outputs.append(ReLU()(inputs[0]))

    for axis in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[0], inputs[1]]))
    for axis in [-4, -3, -2, -1, 1, 2, 3, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[2], inputs[3]]))
    for axis in [-3, -2, -1, 1, 2, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[4], inputs[5]]))
    for axis in [-2, -1, 1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[6], inputs[7]]))
    for axis in [-1, 1]:
        outputs.append(Concatenate(axis=axis)([inputs[8], inputs[9]]))
    for axis in [-1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[14], inputs[15]]))
    for axis in [-1, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[16], inputs[17]]))
    for axis in [-1, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[18], inputs[19]]))
    for axis in [-1, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[20], inputs[21]]))

    outputs.append(UpSampling1D(size=2)(inputs[6]))
    # outputs.append(UpSampling1D(size=2)(inputs[8])) # ValueError: Input 0 of layer up_sampling1d_1 is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [None, 16]

    outputs.append(Multiply()([inputs[10], inputs[11]]))
    outputs.append(Multiply()([inputs[11], inputs[10]]))
    outputs.append(Multiply()([inputs[11], inputs[13]]))
    outputs.append(Multiply()([inputs[10], inputs[11], inputs[12]]))
    outputs.append(Multiply()([inputs[11], inputs[12], inputs[13]]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[23]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[24]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1),
                padding='valid')(up_scale_2(inputs[24]))  # (1, 8, 8)
    x = Concatenate()([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = Concatenate()([MaxPooling2D((2, 2))(x),
                       AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    outputs.append(Add()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Subtract()([inputs[26], inputs[30]]))
    outputs.append(Multiply()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Average()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Maximum()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Concatenate()([inputs[26], inputs[30], inputs[30]]))

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5, name='duplicate_layer_name')(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5, name='duplicate_layer_name'))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    intermediate_model_3_nested = Sequential()
    intermediate_model_3_nested.add(Dense(7, input_shape=(6, )))
    intermediate_model_3_nested.compile(optimizer='rmsprop',
                                        loss='categorical_crossentropy')

    intermediate_model_3 = Sequential()
    intermediate_model_3.add(Dense(6, input_shape=(5, )))
    intermediate_model_3.add(intermediate_model_3_nested)
    intermediate_model_3.add(Dense(8))
    intermediate_model_3.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_3(x)  # (1, 1, 8)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[25]),
        Activation('hard_sigmoid')(inputs[25]),
        Activation('selu')(inputs[25]),
        Activation('sigmoid')(inputs[25]),
        Activation('softplus')(inputs[25]),
        Activation('softmax')(inputs[25]),
        Activation('relu')(inputs[25]),
        Activation('relu6')(inputs[25]),
        Activation('swish')(inputs[25]),
        Activation('exponential')(inputs[25]),
        Activation('gelu')(inputs[25]),
        Activation('softsign')(inputs[25]),
        LeakyReLU()(inputs[25]),
        ReLU()(inputs[25]),
        ReLU(max_value=0.4, negative_slope=1.1, threshold=0.3)(inputs[25]),
        ELU()(inputs[25]),
        PReLU()(inputs[24]),
        PReLU()(inputs[25]),
        PReLU()(inputs[26]),
        shared_activation(inputs[25]),
        Activation('linear')(inputs[26]),
        Activation('linear')(inputs[23]),
        x,
        shared_activation(x),
    ]

    model = Model(inputs=inputs, outputs=outputs, name='test_model_exhaustive')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 2
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=10)
    return model
コード例 #22
0
plt.ylabel('Pressão Atmosférica')
plt.xlabel('Indice')
plt.show()

"""**Inciando o processo de previsão via CNN**"""

from tensorflow.keras.layers import Flatten  #camada flatten para transformar os dados em uma dimensão 
from tensorflow.keras.layers import ZeroPadding1D  #completa os dados após a convolução
from tensorflow.keras.layers import Conv1D  #camada de convolução
from tensorflow.keras.layers import AveragePooling1D  #camada de redução (média dos dados encontrados)

#define a camada de entrada 
camada_entrada = Input(shape=(10,1), dtype='float32')

#adiciona a camada de padding
camada_padding = ZeroPadding1D(padding=1)(camada_entrada)  #matém a quantidade de dados

#adiona a camada de convolução 
camada_convolucao_1D = Conv1D(64, 3, strides=1, use_bias=True)(camada_padding) #adiciona 64 filtros com uma janela de convolução=3

#camada de pooling
camada_pooling = AveragePooling1D(pool_size=3, strides=1)(camada_convolucao_1D)  #reduz através do valor médio encontrado para a convolução (pode ser também o valor máximo)

#camada flatten
camada_flatten = Flatten()(camada_pooling) #utilizada para realizar o "reshape" dos dados para um vetor

#adicionando a camada de dropout
camada_dropout_cnn = Dropout(0.2)(camada_flatten)

#camada de saída
camada_saida = Dense(1, activation='linear')(camada_dropout_cnn)
コード例 #23
0
         bias_initializer='ones'), Input((10, 10, 5)), 32),
 (Conv2D(8,
         kernel_size=3,
         strides=2,
         data_format='channels_last',
         bias_initializer='ones'), Input((10, 10, 5)), 32),
 (ZeroPadding2D(((1, 2), (5, 3))), Input((10, 10, 5)), 32),
 (ZeroPadding2D(((1, 0), (5, 0))), Input((10, 10, 5)), 32),
 (ZeroPadding2D(((0, 1), (0, 5))), Input((10, 10, 5)), 32),
 (ZeroPadding3D(((0, 1), (0, 5), (0, 3))), Input((10, 10, 8, 5)), 32),
 (ZeroPadding3D(((1, 0), (5, 0), (3, 0))), Input((10, 10, 8, 5)), 32),
 (ZeroPadding3D(((1, 2), (5, 4), (3, 6))), Input((10, 10, 8, 5)), 32),
 (Conv1D(
     8, kernel_size=3, data_format='channels_last',
     bias_initializer='ones'), Input((6, 5)), 32),
 (ZeroPadding1D(padding=(1, 0)), Input((6, 5)), 32),
 (ZeroPadding1D(padding=(1, 1)), Input((6, 5)), 32),
 (ZeroPadding1D(padding=(1, 1)), Input((6, 5)), 32),
 (Conv3D(8,
         kernel_size=(3, 3, 3),
         data_format='channels_last',
         bias_initializer='ones'), Input((6, 6, 6, 5)), 32),
 (ComplexConv2D(8, kernel_size=3), Input(
     (6, 6, 5), dtype=tf.complex64), 32),
 (ComplexConv2D(8, kernel_size=3), Input(
     (7, 7, 5), dtype=tf.complex64), 32),
 (ComplexConv2D(8, kernel_size=3,
                dilation_rate=2), Input(
                    (10, 10, 5), dtype=tf.complex64), 32),
 (ComplexConv2D(8, kernel_size=3,
                strides=2), Input((10, 10, 5), dtype=tf.complex64), 32),
コード例 #24
0
    def get_model(
        self,
        shape_inputs: Optional[List[Tuple]] = None,
    ):
        f"""keras model for {self.model_name}

        Args:
            n_output (int): number of neurons in the last layer
            output_layer (str): activation function of last layer
            shape_inputs (list of tuples): Eg: For two inputs [(4800,1),(1000,4)]
        """
        if shape_inputs is None:
            shape_inputs = [(value.get("len_input"), len(value.get("leads")))
                            for value in self.inputs.values()]

        # Inputs
        inp = Input(shape=shape_inputs[0])

        padd = ZeroPadding1D(3)(inp)

        conv1 = Conv1D(
            64,
            7,
            strides=2,
            padding='valid',
            name='conv1',
        )(padd)
        conv1 = BatchNormalization(name='batch2')(conv1)
        conv1 = Activation('relu')(conv1)
        conv1 = ZeroPadding1D(1)(conv1)
        conv1 = MaxPool1D(3, 2)(conv1)

        conv2 = self.convolutional_block(conv1, [64, 64, 256],
                                         3,
                                         '2',
                                         '1',
                                         strides=1)
        conv2 = self.identity_block(conv2, [64, 64, 256], 3, '2', '2')
        conv2 = self.identity_block(conv2, [64, 64, 256], 3, '2', '3')

        conv3 = self.convolutional_block(conv2, [128, 128, 512], 3, '3', '1')
        conv3 = self.identity_block(conv3, [128, 128, 512], 3, '3', '2')
        conv3 = self.identity_block(conv3, [128, 128, 512], 3, '3', '3')
        conv3 = self.identity_block(conv3, [128, 128, 512], 3, '3', '4')

        conv4 = self.convolutional_block(conv3, [256, 256, 1024], 3, '4', '1')
        conv4 = self.identity_block(conv4, [256, 256, 1024], 3, '4', '2')
        conv4 = self.identity_block(conv4, [256, 256, 1024], 3, '4', '3')
        conv4 = self.identity_block(conv4, [256, 256, 1024], 3, '4', '4')
        conv4 = self.identity_block(conv4, [256, 256, 1024], 3, '4', '5')
        conv4 = self.identity_block(conv4, [256, 256, 1024], 3, '4', '6')

        conv5 = self.convolutional_block(conv4, [512, 512, 2048], 3, '5', '1')
        conv5 = self.identity_block(conv5, [512, 512, 2048], 3, '5', '2')
        conv5 = self.identity_block(conv5, [512, 512, 2048], 3, '5', '3')

        avg_pool = GlobalAveragePooling1D()(conv5)
        output = Dense(self.n_output,
                       activation=self.output_layer,
                       dtype=tf.float32)(avg_pool)

        model = Model(inp, output)

        return model
コード例 #25
0
def CNN1d(features_training, labels_training, features_testing, labels_testing,
          features_validation, labels_validation, modelname):
    """convert the 2D numpy arrary to 3D arrary"""
    features_training = features_training.reshape(features_training.shape +
                                                  (1, ))
    features_testing = features_testing.reshape(features_testing.shape + (1, ))
    features_validation = features_validation.reshape(
        features_validation.shape + (1, ))
    nfeature = features_testing.shape[1]

    # """Empty the results file whenever we run TF"""
    # with open('CNN_accuracy_results.csv', 'w', newline='') as writeFile:
    # writer = csv.writer(writeFile)
    # writer.writerow(['Epoch', 'Testing Accuracy'])
    """Check testing accuracy after each epoch, does this by looking at accuracy of first 20000 shuffled testing features and testing labels"""
    class MyCallBack(Callback):
        def on_epoch_end(self, epoch, logs=None):
            acc = accuracy_score(
                labels_testing[:20000],
                model.predict_classes(features_testing[:20000]))
            print("Testing accuracy:", acc)

            # with open('CNN_accuracy_results.csv', 'a', newline='') as writeFile:
            # writer = csv.writer(writeFile)
            # try:
            # writer.writerow([self.model.history.epoch[-1]+2,acc])
            # except:
            # writer.writerow([1, acc])
            # writeFile.close()

    cbk = MyCallBack()
    """CNN model code"""
    print("Training data...")

    model = tf.keras.models.Sequential()
    # model.add(ZeroPadding1D(1,input_shape=(nfeature,1)))
    # model.add(Conv1D(filters=64, kernel_size=3, activation='relu',data_format='channels_last'))
    # model.add(ZeroPadding1D(1))
    # model.add(Conv1D(64, 3, activation='relu',data_format='channels_last'))
    # model.add(MaxPooling1D(pool_size=2, strides=2))

    # model.add(ZeroPadding1D(1))
    # model.add(Conv1D(128, 3, activation='relu',data_format='channels_last'))
    # model.add(ZeroPadding1D(1))
    # model.add(Conv1D(128,3, activation='relu',data_format='channels_last'))
    # model.add(MaxPooling1D(pool_size=2, strides=2))

    model.add(ZeroPadding1D(1, input_shape=(nfeature, 1)))
    model.add(Conv1D(512, 3, activation='relu', data_format='channels_last'))
    model.add(ZeroPadding1D(1))
    model.add(Conv1D(512, 3, activation='relu', data_format='channels_last'))
    model.add(ZeroPadding1D(1))
    model.add(Conv1D(512, 3, activation='relu', data_format='channels_last'))
    # model.add(MaxPooling1D(pool_size=2, strides=2))

    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(8, activation='softmax'))

    start = time.time()

    model.compile(
        optimizer=Adam(lr=1.5e-4, ),  # Good default optimizer to start with
        loss=
        'sparse_categorical_crossentropy',  # how will we calculate our "error." Neural network aims to minimize loss.
        metrics=['accuracy'])  # what to track
    # simple early stopping
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1)

    model.fit(
        features_training,
        labels_training,
        epochs=20,
        batch_size=3200,
        # validation_split=0.2,
        validation_data=(features_validation, labels_validation),
        callbacks=[es],
        shuffle=True)  # train the model
    # # summarize history for accuracy
    # plt.plot(history.history['accuracy'])
    # plt.plot(history.history['val_accuracy'])
    # plt.title('model accuracy')
    # plt.ylabel('accuracy')
    # plt.xlabel('epoch')
    # plt.legend(['train', 'validation'], loc='upper left')
    # plt.show()
    # # summarize history for loss
    # plt.plot(history.history['loss'])
    # plt.plot(history.history['val_loss'])
    # plt.title('model loss')
    # plt.ylabel('loss')
    # plt.xlabel('epoch')
    # plt.legend(['train', 'validation'], loc='upper left')
    # plt.show()

    OA = accuracy_score(labels_testing,
                        model.predict_classes(features_testing))
    Kappa = cohen_kappa_score(labels_testing,
                              model.predict_classes(features_testing))
    array = confusion_matrix(labels_testing,
                             model.predict_classes(features_testing))
    print("Test Accuracy ", OA)
    print("Confusion matrix ", array)
    model.save('CNN1d_model_' + modelname + '.h5')
    end = time.time()
    print(end - start)
    t = end - start
    with open('CNN1d_accuracy_results_' + modelname + '.csv', 'a',
              newline='') as writeFile:
        writer = csv.writer(writeFile)
        writer.writerow([OA, Kappa, t])
        writeFile.close()
    """Visualization of results ~ Confusion matrix ~ Labels_validation X Features_validation"""
    # array=normalize(confusion_matrix(model.history.validation_data[1], model.predict_classes(model.history.validation_data[0])))
    # df_cm = pd.DataFrame(array, range(1,8),
    # range(1,8))
    # sn.set(font_scale=1.4)  #for label size
    # sn.heatmap(df_cm, annot=True,annot_kws={"size": 10}, cmap='Blues')# font size
    # plt.title('Validation accuracy')
    # plt.xlabel('Predicted label', fontsize=16)
    # plt.ylabel('True label', fontsize=16)
    # plt.show()
    """Visualization of results ~ Confusion matrix ~ Labels_testing X Features_testing"""

    df_cm = pd.DataFrame(array, range(1, 8), range(1, 8))
    df_cm.to_csv('CNN1d_ConfusionMatrix_' + modelname + '.csv')

    with open('CNN1d_accuracy_results_' + modelname + '.csv', 'a',
              newline='') as targetcsv:
        writer = csv.writer(targetcsv)
        with open('CNN1d_ConfusionMatrix_' + modelname + '.csv',
                  'r') as sourcecsv:
            reader = csv.reader(sourcecsv)
            for row in reader:
                writer.writerow(row)
        targetcsv.close()
コード例 #26
0
    def ResNet1D(self, save_img=False):
        """ Builds the residual network architecture by calling identity_block_1D and convolutional_block_1D. 
        Puts together structure and depth of model here by stacking identity and convolutional layers.
        More stages/blocks may be added at will.
    """

        # X_input = X_input
        X = ZeroPadding1D(3)(self.X_input)

        # stage 1
        X = Conv1D(filters=32,
                   kernel_size=7,
                   strides=2,
                   kernel_initializer='glorot_uniform',
                   name='Conv1D_Stage_1')(X)
        X = BatchNormalization(name='BN1D_Stage_1')(X)
        X = relu(X)
        X = MaxPooling1D(pool_size=3, strides=2)(X)

        # stage 2
        X = self.convolutional_block_1D(X=X,
                                        filters=[32, 32, 126],
                                        kernel_size=3,
                                        stride=1,
                                        stage=2,
                                        block='A')
        X = self.identity_block_1D(X=X,
                                   filters=[32, 32, 126],
                                   kernel_size=3,
                                   stage=2,
                                   block='B')
        X = self.identity_block_1D(X=X,
                                   filters=[32, 32, 126],
                                   kernel_size=3,
                                   stage=2,
                                   block='C')

        # stage 3
        X = self.convolutional_block_1D(X=X,
                                        filters=[64, 64, 256],
                                        kernel_size=3,
                                        stride=2,
                                        stage=3,
                                        block='A')
        X = self.identity_block_1D(X=X,
                                   filters=[64, 64, 256],
                                   kernel_size=3,
                                   stage=3,
                                   block='B')
        X = self.identity_block_1D(X=X,
                                   filters=[64, 64, 256],
                                   kernel_size=3,
                                   stage=3,
                                   block='C')
        X = self.identity_block_1D(X=X,
                                   filters=[64, 64, 256],
                                   kernel_size=3,
                                   stage=3,
                                   block='D')

        # stage 4
        X = self.convolutional_block_1D(X=X,
                                        filters=[128, 128, 512],
                                        kernel_size=3,
                                        stride=2,
                                        stage=4,
                                        block='A')
        X = self.identity_block_1D(X=X,
                                   filters=[128, 128, 512],
                                   kernel_size=3,
                                   stage=4,
                                   block='B')
        X = self.identity_block_1D(X=X,
                                   filters=[128, 128, 512],
                                   kernel_size=3,
                                   stage=4,
                                   block='C')
        X = self.identity_block_1D(X=X,
                                   filters=[128, 128, 512],
                                   kernel_size=3,
                                   stage=4,
                                   block='D')
        X = self.identity_block_1D(X=X,
                                   filters=[128, 128, 512],
                                   kernel_size=3,
                                   stage=4,
                                   block='E')
        X = self.identity_block_1D(X=X,
                                   filters=[128, 128, 512],
                                   kernel_size=3,
                                   stage=4,
                                   block='F')

        if self.trim_end:
            # return feature map and do NOT perform sigmoid activation.
            return X
        else:
            # get output layers and perform sigmoid activation.
            X = Flatten()(X)
            X = Dense(units=self.num_classes,
                      activation='sigmoid',
                      name='final_dense')(X)

            # create model
            model = Model(inputs=self.X_input, outputs=X, name='myResNet')
            if save_img:
                keras.utils.plot_model(model, 'ResNet1D.png')
            return model
コード例 #27
0
def check_and_add_padding(x, y):
    if y.shape[1] % 2 != 0:
        return ZeroPadding1D(padding=(1, 0))(x)
    return x
コード例 #28
0
def get_test_model_exhaustive():
    """Returns a exhaustive test model."""
    input_shapes = [
        (2, 3, 4, 5, 6),
        (2, 3, 4, 5, 6),
        (7, 8, 9, 10),
        (7, 8, 9, 10),
        (11, 12, 13),
        (11, 12, 13),
        (14, 15),
        (14, 15),
        (16, ),
        (16, ),
        (2, ),
        (1, ),
        (2, ),
        (1, ),
        (1, 3),
        (1, 4),
        (1, 1, 3),
        (1, 1, 4),
        (1, 1, 1, 3),
        (1, 1, 1, 4),
        (1, 1, 1, 1, 3),
        (1, 1, 1, 1, 4),
        (26, 28, 3),
        (4, 4, 3),
        (4, 4, 3),
        (4, ),
        (2, 3),
        (1, ),
        (1, ),
        (1, ),
        (2, 3),
    ]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    outputs.append(Conv1D(1, 3, padding='valid')(inputs[6]))
    outputs.append(Conv1D(2, 1, padding='same')(inputs[6]))
    outputs.append(Conv1D(3, 4, padding='causal', dilation_rate=2)(inputs[6]))
    outputs.append(ZeroPadding1D(2)(inputs[6]))
    outputs.append(Cropping1D((2, 3))(inputs[6]))
    outputs.append(MaxPooling1D(2)(inputs[6]))
    outputs.append(MaxPooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(AveragePooling1D(2)(inputs[6]))
    outputs.append(AveragePooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(GlobalMaxPooling1D()(inputs[6]))
    outputs.append(GlobalMaxPooling1D(data_format="channels_first")(inputs[6]))
    outputs.append(GlobalAveragePooling1D()(inputs[6]))
    outputs.append(
        GlobalAveragePooling1D(data_format="channels_first")(inputs[6]))

    outputs.append(Conv2D(4, (3, 3))(inputs[4]))
    outputs.append(Conv2D(4, (3, 3), use_bias=False)(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), padding='same', dilation_rate=(2, 3))(inputs[4]))

    outputs.append(SeparableConv2D(3, (3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((1, 2))(inputs[4]))

    outputs.append(MaxPooling2D((2, 2))(inputs[4]))
    outputs.append(
        MaxPooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(AveragePooling2D((2, 2))(inputs[4]))
    outputs.append(
        AveragePooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))

    outputs.append(GlobalAveragePooling2D()(inputs[4]))
    outputs.append(
        GlobalAveragePooling2D(data_format="channels_first")(inputs[4]))
    outputs.append(GlobalMaxPooling2D()(inputs[4]))
    outputs.append(GlobalMaxPooling2D(data_format="channels_first")(inputs[4]))

    outputs.append(BatchNormalization()(inputs[4]))
    outputs.append(Dropout(0.5)(inputs[4]))

    outputs.append(ZeroPadding2D(2)(inputs[4]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[4]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[4]))
    outputs.append(Cropping2D(2)(inputs[4]))
    outputs.append(Cropping2D((2, 3))(inputs[4]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[4]))

    outputs.append(Dense(3, use_bias=True)(inputs[13]))
    outputs.append(Dense(3, use_bias=True)(inputs[14]))
    outputs.append(Dense(4, use_bias=False)(inputs[16]))
    outputs.append(Dense(4, use_bias=False, activation='tanh')(inputs[18]))
    outputs.append(Dense(4, use_bias=False)(inputs[20]))

    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='bilinear')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='bilinear')(inputs[4]))

    for axis in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[0], inputs[1]]))
    for axis in [-4, -3, -2, -1, 1, 2, 3, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[2], inputs[3]]))
    for axis in [-3, -2, -1, 1, 2, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[4], inputs[5]]))
    for axis in [-2, -1, 1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[6], inputs[7]]))
    for axis in [-1, 1]:
        outputs.append(Concatenate(axis=axis)([inputs[8], inputs[9]]))
    for axis in [-1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[14], inputs[15]]))
    for axis in [-1, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[16], inputs[17]]))
    for axis in [-1, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[18], inputs[19]]))
    for axis in [-1, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[20], inputs[21]]))

    outputs.append(UpSampling1D(size=2)(inputs[6]))

    outputs.append(Multiply()([inputs[10], inputs[11]]))
    outputs.append(Multiply()([inputs[11], inputs[10]]))
    outputs.append(Multiply()([inputs[11], inputs[13]]))
    outputs.append(Multiply()([inputs[10], inputs[11], inputs[12]]))
    outputs.append(Multiply()([inputs[11], inputs[12], inputs[13]]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[23]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[24]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1),
                padding='valid')(up_scale_2(inputs[24]))  # (1, 8, 8)
    x = Concatenate()([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = Concatenate()([MaxPooling2D((2, 2))(x),
                       AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    outputs.append(Add()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Subtract()([inputs[26], inputs[30]]))
    outputs.append(Multiply()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Average()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Maximum()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Concatenate()([inputs[26], inputs[30], inputs[30]]))

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5)(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[25]),
        Activation('hard_sigmoid')(inputs[25]),
        Activation('selu')(inputs[25]),
        Activation('sigmoid')(inputs[25]),
        Activation('softplus')(inputs[25]),
        Activation('softmax')(inputs[25]),
        Activation('relu')(inputs[25]),
        LeakyReLU()(inputs[25]),
        ELU()(inputs[25]),
        PReLU()(inputs[24]),
        PReLU()(inputs[25]),
        PReLU()(inputs[26]),
        shared_activation(inputs[25]),
        Activation('linear')(inputs[26]),
        Activation('linear')(inputs[23]),
        x,
        shared_activation(x),
    ]

    model = Model(inputs=inputs, outputs=outputs, name='test_model_exhaustive')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 1
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=10)
    return model
コード例 #29
0
def FCN(input_shape, max_seqlen, num_classes=2, norm_max=1.0):
  """
  Generate a fully convolutional neural network (FCN) model.

  Parameters
  ----------
  input_shape : tuple defining shape of the input dataset: (num_timesteps, num_channels)
  num_classes : integer defining number of classes for classification task
  norm_max    : maximum norm for constraint

  Returns
  -------
  model : Keras model
  """
  outputdim = num_classes  # number of classes

  inputs = Input(shape = input_shape)

  # Zero padding
  pad_wd = (max_seqlen - input_shape[0])//2
  x = ZeroPadding1D((pad_wd,pad_wd))(inputs)

  # Stage 1
  x = Conv1D(filters=32, kernel_size=7, strides=2, padding='valid', use_bias=False,
             kernel_constraint=MaxNorm(norm_max, axis=[0,1,2]),
             name = 'conv1', kernel_initializer=glorot_uniform(seed=0))(x)
  x = LeakyReLU(alpha=0.1)(x)
  x = BatchNormalization(axis=-1, momentum=0.9, name='bn_conv1',
                         gamma_constraint=MaxNorm(norm_max,axis=0),
                         beta_constraint=MaxNorm(norm_max,axis=0))(x)

  # Stage 2
  x = conv_block(x, ksz=3, filters=[16,16,32], stage=2, block='a', s=2, norm_max=norm_max)
  x = identity_block(x, ksz=3, filters=[16,16,32], stage=2, block='b', norm_max=norm_max)
  x = identity_block(x, ksz=3, filters=[16,16,32], stage=2, block='c', norm_max=norm_max)

#  # Stage 3
#  x = conv_block(x, ksz=3, filters=[64,64,128], stage=3, block='a', s=2)
#  x = identity_block(x, ksz=3, filters=[64,64,128], stage=3, block='b')
#  x = identity_block(x, ksz=3, filters=[64,64,128], stage=3, block='c')
#  x = identity_block(x, ksz=3, filters=[64,64,128], stage=3, block='d')

#  # Stage 4
#  x = conv_block(x, ksz=3, filters=[128,128,256], stage=4, block='a', s=2)
#  x = identity_block(x, ksz=3, filters=[128,128,256], stage=4, block='b')
#  x = identity_block(x, ksz=3, filters=[128,128,256], stage=4, block='c')
#  x = identity_block(x, ksz=3, filters=[128,128,256], stage=4, block='d')
#  x = identity_block(x, ksz=3, filters=[128,128,256], stage=4, block='e')

#  # Stage 5
#  x = conv_block(x, ksz=3, filters=[256,256,512], stage=5, block='a', s=2)
#  x = identity_block(x, ksz=3, filters=[256,256,512], stage=5, block='b')
#  x = identity_block(x, ksz=3, filters=[256,256,512], stage=5, block='c')
#  x = identity_block(x, ksz=3, filters=[256,256,512], stage=5, block='d')
#  x = identity_block(x, ksz=3, filters=[256,256,512], stage=5, block='e')
#  x = identity_block(x, ksz=3, filters=[256,256,512], stage=5, block='f')

  # Output stage
  x = Conv1DTranspose(x, filters=64, ksz=5, s=4, norm_max=norm_max)
  x = GlobalAveragePooling1D()(x)
  outputs = Dense(num_classes, activation='softmax', name='Dense',
                  kernel_constraint=MaxNorm(norm_max,axis=[0,1]),
                  bias_constraint=MaxNorm(norm_max,axis=0),
                  kernel_initializer=glorot_uniform(seed=0))(x)

  model = Model(inputs=inputs, outputs=outputs)
  return model
コード例 #30
0
def resnet18_1d(input_shape=(750, 1), classes=1, as_model=False):
    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding1D(3)(X_input)

    # Stage 1
    X = Conv1D(64,
               7,
               strides=2,
               name='conv1',
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=2, name='bn_conv1')(X)
    X = Activation('relu')(X)
    X = MaxPooling1D(3, strides=2)(X)

    # Stage 2
    X = identity_block_18_1d(X, 3, [64, 64], stage=2, block='a')
    X = identity_block_18_1d(X, 3, [64, 64], stage=2, block='b')

    # Stage 3
    X = convolutional_block_18_1d(X,
                                  f=3,
                                  filters=[128, 128],
                                  stage=3,
                                  block='a',
                                  s=2)
    X = identity_block_18_1d(X, 3, [128, 128], stage=3, block='b')

    # Stage 4
    X = convolutional_block_18_1d(X,
                                  f=3,
                                  filters=[256, 256],
                                  stage=4,
                                  block='a',
                                  s=2)
    X = identity_block_18_1d(X, 3, [256, 256], stage=4, block='b')

    # Stage 5
    X = convolutional_block_18_1d(X,
                                  f=3,
                                  filters=[512, 512],
                                  stage=5,
                                  block='a',
                                  s=2)
    X = identity_block_18_1d(X, 3, [512, 512], stage=5, block='b')

    # AVGPOOL
    X = AveragePooling1D(2, name="avg_pool")(X)

    # output layer
    X = Flatten()(X)

    if as_model:
        X = Dense(classes,
                  activation='softmax',
                  name='fc' + str(classes),
                  kernel_initializer=glorot_uniform(seed=0))(X)

    # Create modelzoo
    model = Model(inputs=X_input, outputs=X, name='ResNet18_1d')

    return model