Beispiel #1
0
def spliceAI_model(input_shape, num_classes=3):
    """Model builder
    Shortcut layers after every 4 RB blocks.
    """
    inputs = Input(shape=input_shape)

    # initiate 
    x = Conv1D(32, kernel_size=1, strides=1, padding='same', dilation_rate=1)(inputs)
    # another Conv on x before splitting
    y = Conv1D(32, kernel_size=1, strides=1, padding='same', dilation_rate=1)(x)

    d = [1, 4, 10] #dilation
    for i in range(3):
        # RB 1, 2, 3: 32 11 1, 4, 10
        for stack in range(4):
            x = RB_block(x, num_filters=32, kernel_size=11, strides=1, activation='relu', dilation_rate=d[i])
        if i==0 or i==1:
            y = keras.layers.add([Conv1D(32, kernel_size=1, strides=1, padding='same', dilation_rate=1)(x), y])

    x = Conv1D(32, kernel_size=1, strides=1, padding='same', dilation_rate=1)(x)
    # adding up with what was shortcut from the prev layers
    x = keras.layers.add([x, y])

    if num_classes>1:
        x = Conv1D(3, kernel_size=1, strides=1, padding='same', dilation_rate=1)(x)
        x = Dense(num_classes, activation='softmax')(x)
    else:
        x = Conv1D(1, kernel_size=1, strides=1, padding='same', dilation_rate=1)(x)
        x = Dense(1, activation='relu')(x)
    # crop to fit the labels (7k to 5k)
    outputs = Cropping1D(cropping=(1000, 1000))(x)

    model = Model(inputs=inputs, outputs=outputs)

    return model
Beispiel #2
0
def model_CNN_full(input_shape, rb, dil, kernel_size):
    """Model builder
    """
    inputs = Input(shape=input_shape)

    # initiate
    x = Conv1D(32, kernel_size=1, strides=1, padding='same', dilation_rate=1)(inputs)
    # another Conv on x before splitting
    y = Conv1D(32, kernel_size=1, strides=1, padding='same', dilation_rate=1)(x)

    d = [1, dil]  # dilation
    for i in range(2):
        for stack in range(rb):
            x = RB_block(x, num_filters=32, kernel_size=kernel_size, strides=1, activation='relu', dilation_rate=d[i])
        if i == 0:
            y = keras.layers.add([Conv1D(32, kernel_size=1, strides=1, padding='same', dilation_rate=1)(x), y])

    x = Conv1D(32, kernel_size=1, strides=1, padding='same', dilation_rate=1)(x)
    # adding up with what was shortcut from the prev layers
    x = keras.layers.add([x, y])
    x = Conv1D(1, kernel_size=1, strides=1, padding='same', dilation_rate=1)(x)
    x = Dense(1, activation='sigmoid')(x)
    outputs = Cropping1D(cropping=(25, 25))(x)

    model = Model(inputs=inputs, outputs=outputs)

    return model
Beispiel #3
0
    def __init__(self, n_symbols, length=3000, latent_size=1000, n_filters=256,
                 kernel_size=5, pooling_type='average', dropout=0):
        super().__init__(n_symbols)
        self._length = length
        self._latent_size = latent_size
        self._kernel_size = kernel_size
        self._n_filters = n_filters
        pool = AveragePooling1D if pooling_type=='average' else MaxPooling1D
        
        input_embedding = Stack()
        input_embedding.add(Embedding(n_symbols, 128, input_length=self._length))
        input_embedding.add(Lambda(lambda x: x * np.sqrt(n_filters)))
        input_embedding.add(PositionEmbedding())
        input_embedding.add(PaddedConv(1, n_filters, kernel_size, 1, activation='relu', dropout=dropout))
        
        encoder = Stack()
        encoder.add(input_embedding)
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        encoder.add(pool(2,2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        encoder.add(pool(2,2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        encoder.add(pool(2,2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        encoder.add(pool(2,2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        encoder.add(pool(2,2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        encoder.add(pool(2,2))
        
        latent = Stack()
        latent.add(Flatten())
        latent.add(Dense(self._latent_size))
        
        decoder = Stack()
        decoder.add(Dense(47*n_filters, input_shape=(self._latent_size,), activation='relu'))
        decoder.add(Reshape((47, n_filters)))
        decoder.add(UpSampling1D(2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        decoder.add(UpSampling1D(2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        decoder.add(UpSampling1D(2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        decoder.add(UpSampling1D(2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        decoder.add(UpSampling1D(2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        decoder.add(UpSampling1D(2))
        encoder.add(ResidualBlock(1, n_filters, kernel_size, activation='relu', dilation_rate=1, dropout=dropout))
        decoder.add(Cropping1D((0,8)))

        self.encoder = encoder
        self.decoder = decoder
        self.latent = latent
Beispiel #4
0
    def __init__(self, n_symbols, length=3000):
        super().__init__(n_symbols)
        self._length = length
        
        encoder = Stack()
        encoder.add(Embedding(n_symbols, 128, input_length=self._length))
        encoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        encoder.add(BatchNormalization())
        encoder.add(MaxPooling1D(2,2))
        encoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        encoder.add(BatchNormalization())
        encoder.add(MaxPooling1D(2,2))
        encoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        encoder.add(BatchNormalization())
        encoder.add(MaxPooling1D(2,2))
        encoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        encoder.add(BatchNormalization())
        encoder.add(MaxPooling1D(2,2))
        encoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        encoder.add(BatchNormalization())
        encoder.add(MaxPooling1D(2,2))
        encoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        encoder.add(BatchNormalization())
        encoder.add(MaxPooling1D(2,2))
        encoder.add(Flatten())
        encoder.add(Dense(1000))
        
        decoder = Stack()
        decoder.add(Dense(47*256, input_shape=(1000,), activation='relu'))
        decoder.add(Reshape((47, 256)))
        decoder.add(UpSampling1D(2))
        decoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        decoder.add(BatchNormalization())
        decoder.add(UpSampling1D(2))
        decoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        decoder.add(BatchNormalization())
        decoder.add(UpSampling1D(2))
        decoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        decoder.add(BatchNormalization())
        decoder.add(UpSampling1D(2))
        decoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        decoder.add(BatchNormalization())
        decoder.add(UpSampling1D(2))
        decoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        decoder.add(BatchNormalization())
        decoder.add(UpSampling1D(2))
        decoder.add(Conv1D(256, 5, strides=1, padding='same', dilation_rate=1, activation='relu'))
        decoder.add(Cropping1D((0,8)))

        self.encoder = encoder
        self.decoder = decoder
Beispiel #5
0
    def upconv_unit(self,
                    inputs,
                    nb_filter,
                    concatenate_layer,
                    apply_attention=False):
        # transposed convolution
        u = UpSampling1D(size=self.upsize)(inputs)
        u = self.conv1d(nb_filter, stride_size=None)(u)
        if self.batchnorm:
            u = BatchNormalization()(u)
        u = Activation(self.activation)(u)
        if self.dropout_rate:
            u = Dropout(self.dropout_rate)(u)
        #u.shape TensorShape([None, 128, 18])
        # concatenate_layer.shape TensorShape([None, 126, 18])
        shape_diff = u.shape[1] - concatenate_layer.shape[1]
        if shape_diff > 0:
            crop_shape = (shape_diff // 2, shape_diff - shape_diff // 2)
        else:
            crop_shape = None

        if apply_attention:
            if crop_shape:
                crop = Cropping1D(cropping=crop_shape)(u)
                att = self.att_block(xl=concatenate_layer, gate=crop)
                upconv = concatenate([att, crop])
            elif not crop_shape:
                att = self.att_block(xl=concatenate_layer, gate=u)
                upconv = concatenate([att, u])

        elif not apply_attention:
            if crop_shape:
                crop = Cropping1D(cropping=crop_shape)(u)
                upconv = concatenate([concatenate_layer, crop])
            elif not crop_shape:
                upconv = concatenate([concatenate_layer, u])

        return upconv
    def DefineDeepUVNet(_self, _inputShape, _nbFilters, _kernelSize,
                        _convPerLevel, _upConvPerLevel, _optimizer):
        inputs = Input(shape=_inputShape)
        # print("inputs.shape " + str(inputs.shape))
        x = GaussianNoise(0.03)(inputs)
        shortcut = Conv2D(_nbFilters, (1, 1), padding=_self.m_BorderMode)(x)
        # print("shortcut.shape " + str(shortcut.shape))

        levels, nbFilters = _self.DownsamplingPart(shortcut, _nbFilters,
                                                   _kernelSize, _convPerLevel)

        inputLevels = []
        for i in range(len(_convPerLevel) - 1):
            inputLevels.append(levels[len(_convPerLevel) - 1 - 1 - i])

        upLevels = _self.UpsamplingConcatPart(levels[-1], inputLevels,
                                              nbFilters, _kernelSize,
                                              _upConvPerLevel)

        # sigmoid
        if True:
            # if False:
            outputs = Conv2D(1, (1, 1), activation='sigmoid')(upLevels[-1])

        # 2-class softmax
        # elif True:
        elif False:
            nbOutputs = 2
            outputs = Conv2D(nbOutputs, (1, 1))(upLevels[-1])
            outputs = Reshape(
                (nbOutputs, _inputShape[1] * _inputShape[2]))(outputs)
            outputs = Permute((2, 1))(outputs)
            outputs = Activation("softmax")(outputs)
            outputs = Permute((2, 1))(outputs)
            outputs = Cropping1D(cropping=((0, 1)))(outputs)
            outputs = Reshape(
                (nbOutputs - 1, _inputShape[1], _inputShape[2]))(outputs)

        model = Model(inputs=inputs, outputs=outputs)
        model.compile(optimizer=_optimizer,
                      loss=DiceCoefLoss,
                      metrics=[DiceCoef])

        return model
Beispiel #7
0
    def attention_lstm_residual(self):
        input_x = Input(shape = self.input_shape, name = 'input')
        X = input_x
        
        for i in range(self.lstm_blocks):
            query = Dense(10, name='query_' + str(i))(X)
            key = Dense(10, name='key_' + str(i))(X)
            attention_weights = AdditiveAttention(use_scale = False, name='attention_'+str(i))([query, X, key])
            attention_weights = Dense(1, activation='softmax', name='attention_weights_'+str(i))(attention_weights)
            context = Multiply(name='context_'+str(i))([attention_weights,X])
            X = LSTM(self.n_units, return_sequences = True, 
                     recurrent_dropout=self.recurrent_dropout, 
                     kernel_regularizer=l1_l2(self.lstm_l1, self.lstm_l2),
                     activity_regularizer=l1_l2(self.lstm_l1, self.lstm_l2),
                     name = 'lstm_' + str(i))(context)
            if self.dropout_rate > 0:
                X = Dropout(self.dropout_rate, name='dropout_'+str(i))(X)
                
        X = LSTM(self.n_units, return_sequences = False, 
                 recurrent_dropout=self.recurrent_dropout, 
                 kernel_regularizer=l1_l2(self.lstm_l1, self.lstm_l2),
                 activity_regularizer=l1_l2(self.lstm_l1, self.lstm_l2),
                 name = 'lstm_last')(X)
        if self.dropout_rate > 0:
            X = Dropout(self.dropout_rate, name='dropout_last')(X)
        
        crop_input = Cropping1D(cropping=(0, self.input_shape[0] - 1), name='crop_input')(input_x)
        if self.dropout_rate > 0:
            crop_input = Dropout(self.dropout_rate, name='dropout_crop_input')(crop_input)
        flatten_crop = Flatten(name='flatten_crop_input')(crop_input)
        query_input = Dense(10, name='query_input')(flatten_crop)
        key_input = Dense(10, name='key_input')(flatten_crop)
        attention_weights_input = AdditiveAttention(use_scale = False, name='attention_input')([query_input, flatten_crop, key_input])
        attention_weights_input = Dense(1, activation='softmax', name='attention_weights_input')(attention_weights_input)
        context_input = Multiply(name='context_input')([attention_weights_input, flatten_crop])
        concat = Concatenate(name='concat_output')([X, context_input])
        X = Dense(self.n_outputs, activation=self.activation, name = 'output')(concat)

        return Model(inputs=input_x, outputs=X, name='attention_lstm')
Beispiel #8
0
def get_test_model_exhaustive():
    """Returns a exhaustive test model."""
    input_shapes = [(2, 3, 4, 5, 6), (2, 3, 4, 5, 6), (7, 8, 9, 10),
                    (7, 8, 9, 10), (11, 12, 13), (11, 12, 13), (14, 15),
                    (14, 15), (16, ),
                    (16, ), (2, ), (1, ), (2, ), (1, ), (1, 3), (1, 4),
                    (1, 1, 3), (1, 1, 4), (1, 1, 1, 3), (1, 1, 1, 4),
                    (1, 1, 1, 1, 3), (1, 1, 1, 1, 4), (26, 28, 3), (4, 4, 3),
                    (4, 4, 3), (4, ), (2, 3), (1, ), (1, ), (1, ), (2, 3),
                    (9, 16, 1), (1, 9, 16)]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    outputs.append(Conv1D(1, 3, padding='valid')(inputs[6]))
    outputs.append(Conv1D(2, 1, padding='same')(inputs[6]))
    outputs.append(Conv1D(3, 4, padding='causal', dilation_rate=2)(inputs[6]))
    outputs.append(ZeroPadding1D(2)(inputs[6]))
    outputs.append(Cropping1D((2, 3))(inputs[6]))
    outputs.append(MaxPooling1D(2)(inputs[6]))
    outputs.append(MaxPooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(MaxPooling1D(2, data_format="channels_first")(inputs[6]))
    outputs.append(AveragePooling1D(2)(inputs[6]))
    outputs.append(AveragePooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(
        AveragePooling1D(2, data_format="channels_first")(inputs[6]))
    outputs.append(GlobalMaxPooling1D()(inputs[6]))
    outputs.append(GlobalMaxPooling1D(data_format="channels_first")(inputs[6]))
    outputs.append(GlobalAveragePooling1D()(inputs[6]))
    outputs.append(
        GlobalAveragePooling1D(data_format="channels_first")(inputs[6]))

    outputs.append(Conv2D(4, (3, 3))(inputs[4]))
    outputs.append(Conv2D(4, (3, 3), use_bias=False)(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), padding='same', dilation_rate=(2, 3))(inputs[4]))

    outputs.append(SeparableConv2D(3, (3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((1, 2))(inputs[4]))

    outputs.append(MaxPooling2D((2, 2))(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    # outputs.append(MaxPooling2D((2, 2), data_format="channels_first")(inputs[4])) # Default MaxPoolingOp only supports NHWC on device type CPU
    outputs.append(
        MaxPooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(AveragePooling2D((2, 2))(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    # outputs.append(AveragePooling2D((2, 2), data_format="channels_first")(inputs[4])) # Default AvgPoolingOp only supports NHWC on device type CPU
    outputs.append(
        AveragePooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))

    outputs.append(GlobalAveragePooling2D()(inputs[4]))
    outputs.append(
        GlobalAveragePooling2D(data_format="channels_first")(inputs[4]))
    outputs.append(GlobalMaxPooling2D()(inputs[4]))
    outputs.append(GlobalMaxPooling2D(data_format="channels_first")(inputs[4]))

    outputs.append(Permute((3, 4, 1, 5, 2))(inputs[0]))
    outputs.append(Permute((1, 5, 3, 2, 4))(inputs[0]))
    outputs.append(Permute((3, 4, 1, 2))(inputs[2]))
    outputs.append(Permute((2, 1, 3))(inputs[4]))
    outputs.append(Permute((2, 1))(inputs[6]))
    outputs.append(Permute((1, ))(inputs[8]))

    outputs.append(Permute((3, 1, 2))(inputs[31]))
    outputs.append(Permute((3, 1, 2))(inputs[32]))
    outputs.append(BatchNormalization()(Permute((3, 1, 2))(inputs[31])))
    outputs.append(BatchNormalization()(Permute((3, 1, 2))(inputs[32])))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(axis=1)(inputs[0]))
    outputs.append(BatchNormalization(axis=2)(inputs[0]))
    outputs.append(BatchNormalization(axis=3)(inputs[0]))
    outputs.append(BatchNormalization(axis=4)(inputs[0]))
    outputs.append(BatchNormalization(axis=5)(inputs[0]))
    outputs.append(BatchNormalization()(inputs[2]))
    outputs.append(BatchNormalization(axis=1)(inputs[2]))
    outputs.append(BatchNormalization(axis=2)(inputs[2]))
    outputs.append(BatchNormalization(axis=3)(inputs[2]))
    outputs.append(BatchNormalization(axis=4)(inputs[2]))
    outputs.append(BatchNormalization()(inputs[4]))
    # todo: check if TensorFlow >= 2.1 supports this
    # outputs.append(BatchNormalization(axis=1)(inputs[4])) # tensorflow.python.framework.errors_impl.InternalError:  The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now.
    outputs.append(BatchNormalization(axis=2)(inputs[4]))
    outputs.append(BatchNormalization(axis=3)(inputs[4]))
    outputs.append(BatchNormalization()(inputs[6]))
    outputs.append(BatchNormalization(axis=1)(inputs[6]))
    outputs.append(BatchNormalization(axis=2)(inputs[6]))
    outputs.append(BatchNormalization()(inputs[8]))
    outputs.append(BatchNormalization(axis=1)(inputs[8]))
    outputs.append(BatchNormalization()(inputs[27]))
    outputs.append(BatchNormalization(axis=1)(inputs[27]))
    outputs.append(BatchNormalization()(inputs[14]))
    outputs.append(BatchNormalization(axis=1)(inputs[14]))
    outputs.append(BatchNormalization(axis=2)(inputs[14]))
    outputs.append(BatchNormalization()(inputs[16]))
    # todo: check if TensorFlow >= 2.1 supports this
    # outputs.append(BatchNormalization(axis=1)(inputs[16])) # tensorflow.python.framework.errors_impl.InternalError:  The CPU implementation of FusedBatchNorm only supports NHWC tensor format for now.
    outputs.append(BatchNormalization(axis=2)(inputs[16]))
    outputs.append(BatchNormalization(axis=3)(inputs[16]))
    outputs.append(BatchNormalization()(inputs[18]))
    outputs.append(BatchNormalization(axis=1)(inputs[18]))
    outputs.append(BatchNormalization(axis=2)(inputs[18]))
    outputs.append(BatchNormalization(axis=3)(inputs[18]))
    outputs.append(BatchNormalization(axis=4)(inputs[18]))
    outputs.append(BatchNormalization()(inputs[20]))
    outputs.append(BatchNormalization(axis=1)(inputs[20]))
    outputs.append(BatchNormalization(axis=2)(inputs[20]))
    outputs.append(BatchNormalization(axis=3)(inputs[20]))
    outputs.append(BatchNormalization(axis=4)(inputs[20]))
    outputs.append(BatchNormalization(axis=5)(inputs[20]))

    outputs.append(Dropout(0.5)(inputs[4]))

    outputs.append(ZeroPadding2D(2)(inputs[4]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[4]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[4]))
    outputs.append(Cropping2D(2)(inputs[4]))
    outputs.append(Cropping2D((2, 3))(inputs[4]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[4]))

    outputs.append(Dense(3, use_bias=True)(inputs[13]))
    outputs.append(Dense(3, use_bias=True)(inputs[14]))
    outputs.append(Dense(4, use_bias=False)(inputs[16]))
    outputs.append(Dense(4, use_bias=False, activation='tanh')(inputs[18]))
    outputs.append(Dense(4, use_bias=False)(inputs[20]))

    outputs.append(Reshape(((2 * 3 * 4 * 5 * 6), ))(inputs[0]))
    outputs.append(Reshape((2, 3 * 4 * 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4 * 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4, 5 * 6))(inputs[0]))
    outputs.append(Reshape((2, 3, 4, 5, 6))(inputs[0]))

    outputs.append(Reshape((16, ))(inputs[8]))
    outputs.append(Reshape((2, 8))(inputs[8]))
    outputs.append(Reshape((2, 2, 4))(inputs[8]))
    outputs.append(Reshape((2, 2, 2, 2))(inputs[8]))
    outputs.append(Reshape((2, 2, 1, 2, 2))(inputs[8]))

    outputs.append(RepeatVector(3)(inputs[8]))

    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='bilinear')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='bilinear')(inputs[4]))

    outputs.append(ReLU()(inputs[0]))

    for axis in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[0], inputs[1]]))
    for axis in [-4, -3, -2, -1, 1, 2, 3, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[2], inputs[3]]))
    for axis in [-3, -2, -1, 1, 2, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[4], inputs[5]]))
    for axis in [-2, -1, 1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[6], inputs[7]]))
    for axis in [-1, 1]:
        outputs.append(Concatenate(axis=axis)([inputs[8], inputs[9]]))
    for axis in [-1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[14], inputs[15]]))
    for axis in [-1, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[16], inputs[17]]))
    for axis in [-1, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[18], inputs[19]]))
    for axis in [-1, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[20], inputs[21]]))

    outputs.append(UpSampling1D(size=2)(inputs[6]))
    # outputs.append(UpSampling1D(size=2)(inputs[8])) # ValueError: Input 0 of layer up_sampling1d_1 is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [None, 16]

    outputs.append(Multiply()([inputs[10], inputs[11]]))
    outputs.append(Multiply()([inputs[11], inputs[10]]))
    outputs.append(Multiply()([inputs[11], inputs[13]]))
    outputs.append(Multiply()([inputs[10], inputs[11], inputs[12]]))
    outputs.append(Multiply()([inputs[11], inputs[12], inputs[13]]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[23]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[24]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1),
                padding='valid')(up_scale_2(inputs[24]))  # (1, 8, 8)
    x = Concatenate()([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = Concatenate()([MaxPooling2D((2, 2))(x),
                       AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    outputs.append(Add()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Subtract()([inputs[26], inputs[30]]))
    outputs.append(Multiply()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Average()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Maximum()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Concatenate()([inputs[26], inputs[30], inputs[30]]))

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5, name='duplicate_layer_name')(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5, name='duplicate_layer_name'))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    intermediate_model_3_nested = Sequential()
    intermediate_model_3_nested.add(Dense(7, input_shape=(6, )))
    intermediate_model_3_nested.compile(optimizer='rmsprop',
                                        loss='categorical_crossentropy')

    intermediate_model_3 = Sequential()
    intermediate_model_3.add(Dense(6, input_shape=(5, )))
    intermediate_model_3.add(intermediate_model_3_nested)
    intermediate_model_3.add(Dense(8))
    intermediate_model_3.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_3(x)  # (1, 1, 8)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[25]),
        Activation('hard_sigmoid')(inputs[25]),
        Activation('selu')(inputs[25]),
        Activation('sigmoid')(inputs[25]),
        Activation('softplus')(inputs[25]),
        Activation('softmax')(inputs[25]),
        Activation('relu')(inputs[25]),
        Activation('relu6')(inputs[25]),
        Activation('swish')(inputs[25]),
        Activation('exponential')(inputs[25]),
        Activation('gelu')(inputs[25]),
        Activation('softsign')(inputs[25]),
        LeakyReLU()(inputs[25]),
        ReLU()(inputs[25]),
        ReLU(max_value=0.4, negative_slope=1.1, threshold=0.3)(inputs[25]),
        ELU()(inputs[25]),
        PReLU()(inputs[24]),
        PReLU()(inputs[25]),
        PReLU()(inputs[26]),
        shared_activation(inputs[25]),
        Activation('linear')(inputs[26]),
        Activation('linear')(inputs[23]),
        x,
        shared_activation(x),
    ]

    model = Model(inputs=inputs, outputs=outputs, name='test_model_exhaustive')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 2
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=10)
    return model
print(y_train.shape)

# Entrenar y validar el modelo
model = Sequential()
model.add(BatchNormalization(input_shape=(window, x_train.shape[2], )))

model.add(LSTM(50, return_sequences=True, activation="relu"))
model.add(BatchNormalization())
model.add(LSTM(50, return_sequences=True, activation="relu"))
model.add(BatchNormalization())
model.add(LSTM(50, return_sequences=True, activation="relu"))
model.add(BatchNormalization())

model.add(LSTM(y_train.shape[2], activation="relu", return_sequences=True))
model.add(BatchNormalization())
model.add(Cropping1D((x_train.shape[1]-horizon, 0)))
model.add(BatchNormalization())
model.add(Dense(len(targets)))
model.compile(optimizer=Adam(lr=1e-2), loss="mse")
model.summary()
history = model.fit(x_train, y_train, epochs=400, batch_size=window*10, validation_data=(x_test, y_test), verbose=2)
pyplot.plot(history.history["loss"])
pyplot.plot(history.history["val_loss"])
pyplot.title("model train vs validation loss")
pyplot.ylabel("loss")
pyplot.xlabel("epoch")
pyplot.legend(["train", "validation"], loc="upper right")
pyplot.show()

# Resultados de predicción
data1 = x_test[0]
Beispiel #10
0
def get_test_model_exhaustive():
    """Returns a exhaustive test model."""
    input_shapes = [
        (2, 3, 4, 5, 6),
        (2, 3, 4, 5, 6),
        (7, 8, 9, 10),
        (7, 8, 9, 10),
        (11, 12, 13),
        (11, 12, 13),
        (14, 15),
        (14, 15),
        (16, ),
        (16, ),
        (2, ),
        (1, ),
        (2, ),
        (1, ),
        (1, 3),
        (1, 4),
        (1, 1, 3),
        (1, 1, 4),
        (1, 1, 1, 3),
        (1, 1, 1, 4),
        (1, 1, 1, 1, 3),
        (1, 1, 1, 1, 4),
        (26, 28, 3),
        (4, 4, 3),
        (4, 4, 3),
        (4, ),
        (2, 3),
        (1, ),
        (1, ),
        (1, ),
        (2, 3),
    ]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    outputs.append(Conv1D(1, 3, padding='valid')(inputs[6]))
    outputs.append(Conv1D(2, 1, padding='same')(inputs[6]))
    outputs.append(Conv1D(3, 4, padding='causal', dilation_rate=2)(inputs[6]))
    outputs.append(ZeroPadding1D(2)(inputs[6]))
    outputs.append(Cropping1D((2, 3))(inputs[6]))
    outputs.append(MaxPooling1D(2)(inputs[6]))
    outputs.append(MaxPooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(AveragePooling1D(2)(inputs[6]))
    outputs.append(AveragePooling1D(2, strides=2, padding='same')(inputs[6]))
    outputs.append(GlobalMaxPooling1D()(inputs[6]))
    outputs.append(GlobalMaxPooling1D(data_format="channels_first")(inputs[6]))
    outputs.append(GlobalAveragePooling1D()(inputs[6]))
    outputs.append(
        GlobalAveragePooling1D(data_format="channels_first")(inputs[6]))

    outputs.append(Conv2D(4, (3, 3))(inputs[4]))
    outputs.append(Conv2D(4, (3, 3), use_bias=False)(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(
        Conv2D(4, (2, 4), padding='same', dilation_rate=(2, 3))(inputs[4]))

    outputs.append(SeparableConv2D(3, (3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((3, 3))(inputs[4]))
    outputs.append(DepthwiseConv2D((1, 2))(inputs[4]))

    outputs.append(MaxPooling2D((2, 2))(inputs[4]))
    outputs.append(
        MaxPooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))
    outputs.append(AveragePooling2D((2, 2))(inputs[4]))
    outputs.append(
        AveragePooling2D((1, 3), strides=(2, 3), padding='same')(inputs[4]))

    outputs.append(GlobalAveragePooling2D()(inputs[4]))
    outputs.append(
        GlobalAveragePooling2D(data_format="channels_first")(inputs[4]))
    outputs.append(GlobalMaxPooling2D()(inputs[4]))
    outputs.append(GlobalMaxPooling2D(data_format="channels_first")(inputs[4]))

    outputs.append(BatchNormalization()(inputs[4]))
    outputs.append(Dropout(0.5)(inputs[4]))

    outputs.append(ZeroPadding2D(2)(inputs[4]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[4]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[4]))
    outputs.append(Cropping2D(2)(inputs[4]))
    outputs.append(Cropping2D((2, 3))(inputs[4]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[4]))

    outputs.append(Dense(3, use_bias=True)(inputs[13]))
    outputs.append(Dense(3, use_bias=True)(inputs[14]))
    outputs.append(Dense(4, use_bias=False)(inputs[16]))
    outputs.append(Dense(4, use_bias=False, activation='tanh')(inputs[18]))
    outputs.append(Dense(4, use_bias=False)(inputs[20]))

    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='nearest')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(1, 2), interpolation='bilinear')(inputs[4]))
    outputs.append(
        UpSampling2D(size=(5, 3), interpolation='bilinear')(inputs[4]))

    for axis in [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[0], inputs[1]]))
    for axis in [-4, -3, -2, -1, 1, 2, 3, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[2], inputs[3]]))
    for axis in [-3, -2, -1, 1, 2, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[4], inputs[5]]))
    for axis in [-2, -1, 1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[6], inputs[7]]))
    for axis in [-1, 1]:
        outputs.append(Concatenate(axis=axis)([inputs[8], inputs[9]]))
    for axis in [-1, 2]:
        outputs.append(Concatenate(axis=axis)([inputs[14], inputs[15]]))
    for axis in [-1, 3]:
        outputs.append(Concatenate(axis=axis)([inputs[16], inputs[17]]))
    for axis in [-1, 4]:
        outputs.append(Concatenate(axis=axis)([inputs[18], inputs[19]]))
    for axis in [-1, 5]:
        outputs.append(Concatenate(axis=axis)([inputs[20], inputs[21]]))

    outputs.append(UpSampling1D(size=2)(inputs[6]))

    outputs.append(Multiply()([inputs[10], inputs[11]]))
    outputs.append(Multiply()([inputs[11], inputs[10]]))
    outputs.append(Multiply()([inputs[11], inputs[13]]))
    outputs.append(Multiply()([inputs[10], inputs[11], inputs[12]]))
    outputs.append(Multiply()([inputs[11], inputs[12], inputs[13]]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[23]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[24]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1),
                padding='valid')(up_scale_2(inputs[24]))  # (1, 8, 8)
    x = Concatenate()([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = Concatenate()([MaxPooling2D((2, 2))(x),
                       AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    outputs.append(Add()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Subtract()([inputs[26], inputs[30]]))
    outputs.append(Multiply()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Average()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Maximum()([inputs[26], inputs[30], inputs[30]]))
    outputs.append(Concatenate()([inputs[26], inputs[30], inputs[30]]))

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5)(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[25]),
        Activation('hard_sigmoid')(inputs[25]),
        Activation('selu')(inputs[25]),
        Activation('sigmoid')(inputs[25]),
        Activation('softplus')(inputs[25]),
        Activation('softmax')(inputs[25]),
        Activation('relu')(inputs[25]),
        LeakyReLU()(inputs[25]),
        ELU()(inputs[25]),
        PReLU()(inputs[24]),
        PReLU()(inputs[25]),
        PReLU()(inputs[26]),
        shared_activation(inputs[25]),
        Activation('linear')(inputs[26]),
        Activation('linear')(inputs[23]),
        x,
        shared_activation(x),
    ]

    model = Model(inputs=inputs, outputs=outputs, name='test_model_exhaustive')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 1
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=10)
    return model
Beispiel #11
0
def test_delete_channels_cropping1d(channel_index):
    layer = Cropping1D(3)
    layer_test_helper_flatten_1d(layer, channel_index)