예제 #1
0
def encoder_model():
    model = Sequential()

    # 128x128
    model.add(ConvLSTM2D(filters=128,
                         kernel_size=(11, 11),
                         padding='same',
                         strides=(4, 4),
                         return_sequences=True,
                         activation='relu',
                         dropout=0.5,
                         recurrent_dropout=0.5,
                         input_shape=(VIDEO_LENGTH, 128, 128, 3)))
    model.add(TimeDistributed(BatchNormalization()))

    # 32x32
    model.add(ConvLSTM2D(filters=64,
                         kernel_size=(5, 5),
                         padding='same',
                         strides=(2, 2),
                         return_sequences=True,
                         activation='relu',
                         dropout=0.5,
                         recurrent_dropout=0.5))
    model.add(TimeDistributed(BatchNormalization()))

    return model
예제 #2
0
def get_lstm():
    inputs = Input((None, 40, 40, 1))
    lstm1 = ConvLSTM2D(filters=40,
                       kernel_size=(3, 3),
                       padding='same',
                       return_sequences=True)(inputs)
    norm1 = BatchNormalization()(lstm1)
    lstm2 = ConvLSTM2D(filters=40,
                       kernel_size=(3, 3),
                       padding='same',
                       return_sequences=True)(norm1)
    norm2 = BatchNormalization()(lstm2)
    lstm3 = ConvLSTM2D(filters=40,
                       kernel_size=(3, 3),
                       padding='same',
                       return_sequences=True)(norm2)
    norm3 = BatchNormalization()(lstm3)
    lstm4 = ConvLSTM2D(filters=40,
                       kernel_size=(3, 3),
                       padding='same',
                       return_sequences=True)(norm3)
    norm4 = BatchNormalization()(lstm4)
    conv1 = Conv3D(filters=1,
                   kernel_size=(3, 3, 3),
                   activation='sigmoid',
                   padding='same',
                   data_format='channels_last')(norm4)
    model = Model(inputs=[inputs], outputs=[conv1])
    return model
예제 #3
0
파일: models.py 프로젝트: Hadryan/EDUVSUM
def getLstmModelConvLstmWoPool(inputShape, optimizerName, activationName):
    print('Build model...')
    filters = [40, 40]
    denseLayers = [32, 16, 1]
    input1 = keras.layers.Input(shape=inputShape)
    ls1 = ConvLSTM2D(
        filters=filters[0],
        kernel_size=(3, 3),
        input_shape=inputShape,  #(None, 40, 40, 1)
        padding='same',
        return_sequences=True)(input1)
    bn1 = BatchNormalization()(ls1)

    ls2 = ConvLSTM2D(filters=filters[1], kernel_size=(3, 3),
                     padding='same')(bn1)
    bn2 = BatchNormalization()(ls2)

    flat_layer = keras.layers.Flatten()(bn2)
    d1 = keras.layers.Dense(denseLayers[0],
                            activation=activationName)(flat_layer)
    d2 = keras.layers.Dense(denseLayers[1], activation=activationName)(d1)
    out = keras.layers.Dense(denseLayers[2])(d2)
    model = keras.models.Model(inputs=[input1], outputs=out)
    model.compile(loss='mean_squared_error',
                  optimizer=optimizerName,
                  metrics=['mse'])
    return model
예제 #4
0
def temporal_model():
    model = Sequential()
    model.add(
        ConvLSTM2D(filters=64,
                   kernel_size=(3, 3),
                   padding='same',
                   strides=1,
                   return_sequences=True,
                   input_shape=(VIDEO_LENGTH, 32, 32, 64)))
    model.add(TimeDistributed(BatchNormalization()))

    model.add(
        ConvLSTM2D(filters=32,
                   kernel_size=(3, 3),
                   padding='same',
                   strides=1,
                   return_sequences=True))
    model.add(TimeDistributed(BatchNormalization()))

    model.add(
        ConvLSTM2D(filters=64,
                   kernel_size=(3, 3),
                   padding='same',
                   strides=1,
                   return_sequences=True))
    model.add(TimeDistributed(BatchNormalization()))

    return model
예제 #5
0
def getModel(name):
    if name == 'ConvLSTM':
        seq = Sequential()
        seq.add(
            ConvLSTM2D(filters=32,
                       kernel_size=(3, 3),
                       input_shape=(None, HEIGHT, WIDTH, CHANNEL),
                       padding='same',
                       return_sequences=True))
        seq.add(BatchNormalization())

        seq.add(
            ConvLSTM2D(filters=32,
                       kernel_size=(3, 3),
                       padding='same',
                       return_sequences=True))
        seq.add(BatchNormalization())

        seq.add(
            ConvLSTM2D(filters=32,
                       kernel_size=(3, 3),
                       padding='same',
                       return_sequences=True))
        seq.add(BatchNormalization())

        seq.add(
            ConvLSTM2D(filters=CHANNEL,
                       kernel_size=(3, 3),
                       padding='same',
                       return_sequences=False,
                       activation='relu'))
        return seq
    else:
        return None
예제 #6
0
def build_model(frames):
    ''' Assemble Keras convLSTM2D mode; '''
    n_conv_filters = 30
    model = Sequential()
    model.add(
        ConvLSTM2D(filters=n_conv_filters,
                   kernel_size=(3, 3),
                   input_shape=(None, frames.shape[1], frames.shape[2], 1),
                   padding='same',
                   return_sequences=True))
    model.add(
        ConvLSTM2D(filters=n_conv_filters,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    model.add(
        ConvLSTM2D(filters=n_conv_filters,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    model.add(
        ConvLSTM2D(filters=n_conv_filters,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    model.add(
        Conv3D(filters=1,
               kernel_size=(3, 3, 3),
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))
    model.compile(loss='mae', optimizer='sgd')
    return model
예제 #7
0
def create_ConvLSTM_layers(X, Y, filters, kernel_size, batch_size, epochs,
                           learning_rate):

    (time_steps, rows, columns) = X.shape[1:]
    model = Sequential()
    model.add(
        ConvLSTM2D(filters=filters,
                   kernel_size=kernel_size,
                   input_shape=(time_steps, rows, columns, 1),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(5, 1, 1), padding='same'))
    model.add(
        ConvLSTM2D(filters=1,
                   kernel_size=kernel_size,
                   input_shape=(time_steps, rows, columns, 1),
                   padding='same',
                   data_format='channels_last'))

    adam = optimizers.Adam(lr=learning_rate, beta_1=0.99, beta_2=0.999)
    model.compile(loss='mse', optimizer=adam, metrics=['RootMeanSquaredError'])

    # fit model to data
    history = model.fit(X,
                        Y,
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=2,
                        shuffle=True)

    return model, history
예제 #8
0
def get_model(shape=None):
    """
    :param shape: the input shape
    :return: model sequential
    """
    seq = Sequential()
    seq.add(ConvLSTM2D(nb_filter=24, nb_row=5, nb_col=5,
                       input_shape=shape,
                       subsample=(4, 4),
                       border_mode='same', return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(ConvLSTM2D(nb_filter=48, nb_row=3, nb_col=3,
                       subsample=(2, 2),
                       border_mode='same', return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(ConvLSTM2D(nb_filter=64, nb_row=3, nb_col=3,
                       subsample=(2, 2),
                       border_mode='same', return_sequences=False))

    seq.add(Flatten())
    seq.add(Dense(512, activation='relu'))
    seq.add(Dense(1))

    seq.compile(loss='mse', optimizer=RMSprop(lr=0.00001))

    return seq
예제 #9
0
def convLSTM_net(conf=(15, 1, 32, 32), external_dim=8,  kernel_size=(3, 3), filters=40, nb_stack=1, batchNormalization=False, regularization=True): 
    main_inputs = [] 
    timesteps, channels, map_height, map_width = conf
    input = Input(shape=(timesteps, channels, map_height, map_width)) 
    if regularization:
        convlstm_output = ConvLSTM2D(filters=filters, kernel_size=kernel_size, kernel_regularizer=keras.regularizers.l1_l2(l1=0.01, l2=0.01),padding='same', return_sequences=True,data_format='channels_first')(input)#,recurrent_regularizer=keras.regularizers.l1(0.01) 
    else:
        convlstm_output = ConvLSTM2D(filters=filters, kernel_size=kernel_size,padding='same', return_sequences=True,data_format='channels_first')(input)
        
    if batchNormalization:
        convlstm_output = BatchNormalization(mode=0, axis=1)(convlstm_output)
    for i in range(nb_stack):
        if regularization:
            convlstm_output = ConvLSTM2D(filters=filters, kernel_size=kernel_size, kernel_regularizer=keras.regularizers.l1_l2(l1=0.01, l2=0.01),padding='same', return_sequences=True,data_format='channels_first')(convlstm_output)  
        else:
            convlstm_output = ConvLSTM2D(filters=filters, kernel_size=kernel_size, padding='same', return_sequences=True,data_format='channels_first')(convlstm_output)
        if batchNormalization:
            convlstm_output = BatchNormalization(mode=0, axis=1)(convlstm_output)
    convlstm_output = TimeDistributed(Flatten())(convlstm_output)  
    convlstm_output = TimeDistributed(Dense(units=10,activation='relu'))(convlstm_output) 
    convlstm_output = TimeDistributed(Dense(units=1,activation='relu'))(convlstm_output) 
    convlstm_output = Flatten()(convlstm_output) 
    main_inputs.append(input)
    init_input = Input(shape=(external_dim,))
    main_inputs.append(init_input)
    main_output = concatenate([init_input, convlstm_output])
    main_output = Dense(units=10,activation='relu')(main_output)
    out = Dense(units=1,activation='relu')(main_output)
    model = Model(inputs=main_inputs, outputs=out)
    return model
예제 #10
0
def create_model(look_back=20, lr=0.001, decay=0.0005, f1=16, f2=8):
    '''
    '''
    model = Sequential([
        ConvLSTM2D(filters=f1,
                   kernel_size=(3, 3),
                   input_shape=(look_back, 1, 32, 32),
                   padding='same',
                   data_format='channels_first',
                   return_sequences=True),

        # Dropout(0.5),
        ConvLSTM2D(filters=f2,
                   kernel_size=(3, 3),
                   padding='same',
                   data_format='channels_first',
                   return_sequences=False),
        Conv2D(filters=1,
               kernel_size=(1, 1),
               activation='relu',
               padding='same',
               data_format='channels_first')
    ])

    optim = Adam(lr=lr, decay=decay)

    model.compile(loss='mse', optimizer=optim, metrics=['mae'])

    return model
예제 #11
0
def create_seq():
    seq = Sequential()
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   input_shape=(None, 40, 40, 1),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        Conv3D(filters=1,
               kernel_size=(3, 3, 3),
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))
    seq.compile(loss='binary_crossentropy', optimizer='adadelta')
    return seq
예제 #12
0
def fn_get_model_convLSTM_tframe_5():
    model = Sequential()
    model.add(ConvLSTM2D(filters=64, kernel_size=(7, 7),
                         input_shape=(None, width, height, 1), padding='same', return_sequences=True,
                         activation='tanh', recurrent_activation='hard_sigmoid',
                         kernel_initializer='glorot_uniform', unit_forget_bias=True,
                         dropout=0.3, recurrent_dropout=0.3, go_backwards=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=32, kernel_size=(7, 7), padding='same', return_sequences=True,
                         activation='tanh', recurrent_activation='hard_sigmoid',
                         kernel_initializer='glorot_uniform', unit_forget_bias=True,
                         dropout=0.4, recurrent_dropout=0.3, go_backwards=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=32, kernel_size=(7, 7), padding='same', return_sequences=True,
                         activation='tanh', recurrent_activation='hard_sigmoid',
                         kernel_initializer='glorot_uniform', unit_forget_bias=True,
                         dropout=0.4, recurrent_dropout=0.3, go_backwards=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=32, kernel_size=(7, 7), padding='same', return_sequences=False,
                         activation='tanh', recurrent_activation='hard_sigmoid',
                         kernel_initializer='glorot_uniform', unit_forget_bias=True,
                         dropout=0.4, recurrent_dropout=0.3, go_backwards=True))
    model.add(BatchNormalization())

    model.add(Conv2D(filters=1, kernel_size=(1, 1), activation='sigmoid',
                     padding='same', data_format='channels_last'))

    print(model.summary())

    return model
예제 #13
0
파일: misc.py 프로젝트: Hydroviet/Modis
def createModel(filters, kernel_size, input_shape, n_hidden_layers):
    seq = Sequential()
    kernel_size_tuple = (kernel_size, kernel_size)
    seq.add(
        ConvLSTM2D(filters=filters,
                   kernel_size=kernel_size_tuple,
                   input_shape=input_shape,
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())

    for _ in range(n_hidden_layers):
        seq.add(
            ConvLSTM2D(filters=filters,
                       kernel_size=kernel_size_tuple,
                       padding='same',
                       return_sequences=True))
        seq.add(BatchNormalization())

    seq.add(
        ConvLSTM2D(filters=filters,
                   kernel_size=kernel_size_tuple,
                   padding='same',
                   return_sequences=False))
    seq.add(BatchNormalization())

    seq.add(
        Conv2D(filters=1,
               kernel_size=kernel_size_tuple,
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))

    return seq
예제 #14
0
def create_model():
    # We create a layer which take as input movies of shape
    # (n_frames, width, height, channels) and returns a movie
    # of identical shape.

    model = Sequential()
    model.add(ConvLSTM2D(filters=90, kernel_size=(3, 3),
                         input_shape=(15, 95, 120, 3),
                         padding='same', return_sequences=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=90, kernel_size=(3, 3),
                         padding='same', return_sequences=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=90, kernel_size=(3, 3),
                         padding='same', return_sequences=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=90, kernel_size=(3, 3),
                         padding='same', return_sequences=False))
    model.add(BatchNormalization())

    model.add(Conv2D(filters=3, kernel_size=(3, 3),
                   activation='relu',
                   padding='same', data_format='channels_last'))
    model.compile(loss='mean_squared_error', optimizer='adadelta')

    return model
def build_model(loss="mse", num_outs=1):
    model = Sequential()

    model.add(
        ConvLSTM2D(filters=20,
                   kernel_size=(3, 3),
                   input_shape=(None, 9, 1, 1),
                   padding='same',
                   return_sequences=True))
    model.add(Dropout(0.2))

    model.add(
        ConvLSTM2D(filters=20,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=num_outs))
    model.add(Activation("linear"))

    start = time.time()
    model.compile(loss=loss, optimizer="rmsprop")

    return model
예제 #16
0
def full_combined_conv_lstm_model(env):
    # print(env.observation_space.shape)
    input_shape = (1, *env.observation_space.shape)

    model = Sequential()
    model.add(
        ConvLSTM2D(
            filters=32,
            kernel_size=(8, 8),
            strides=(4, 4),
            input_shape=input_shape,
            padding="same",
            return_sequences=True,
        ))
    model.add(BatchNormalization())

    model.add(
        ConvLSTM2D(
            filters=64,
            kernel_size=(4, 4),
            strides=(2, 2),
            padding="same",
            return_sequences=True,
        ))
    model.add(BatchNormalization())

    model.add(Flatten())
    model.add(Dense(512, activation="relu"))
    model.add(Dense(env.action_space.n))
    model.compile(
        loss=categorical_crossentropy,
        optimizer=Adam(),
        metrics=["accuracy"],
    )
    return model
예제 #17
0
def RNN_model_multi(x_num, y_num, period):
    input = Input(shape=(period * nb_flow, x_num, y_num))
    reshape = Reshape((period, nb_flow, x_num, y_num))(input)
    bn1 = BatchNormalization()(reshape)
    convLSTM = ConvLSTM2D(nb_filter=32, kernel_size=(3, 3),
                          border_mode="same",
                          # recurrent_activation="relu",
                          activation="relu",
                          return_sequences=True)(bn1)
    bn2 = BatchNormalization()(convLSTM)
    convLSTM2 = ConvLSTM2D(nb_filter=32, kernel_size=(1, 1),
                           border_mode="same",
                           # recurrent_activation="relu",
                           activation="relu",
                           return_sequences=True,
                           go_backwards=False)(bn2)
    bn3 = BatchNormalization()(convLSTM2)
    convLSTM3 = ConvLSTM2D(nb_filter=32, kernel_size=(1, 1),
                           border_mode="same",
                           # recurrent_activation="relu",
                           activation="relu",
                           return_sequences=False,
                           go_backwards=False)(bn3)
    output = Convolution2D(nb_filter=nb_flow, nb_row=3, nb_col=3, border_mode="same")(convLSTM3)
    output = Activation('tanh')(output)
    model = Model(input=input, output=output)
    return model
예제 #18
0
def build_model(n_step=6,
                row=500,
                col=500,
                channel=3,
                n_hidden_units=64,
                output_units=2):
    print("###############model build###############")
    #input_shape or batch_input_shape in the first layer for automatic build
    # model = models.Sequential()
    # model.add(LSTM(n_hidden_units,
    #         batch_input_shape=(None, n_step, n_input),# (batch,timestep,input_dim)
    #         return_sequences = True,unroll=False, name='LSTM_layer01'))
    # model.add(BatchNormalization())
    # model.add(LSTM(output_units,
    #         return_sequences = True,unroll=False, name='LSTM_layer02'))
    # model.add(BatchNormalization()) # 此步不加,会出现loss不下降
    # model.add(Dense(output_units, name='Dense_layer'))
    # model.add(Activation('softmax', name='activation_layer'))
    print("###############model build###############")
    model = Sequential()
    model.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   input_shape=(n_step, row, col, channel),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(
        Conv3D(filters=3,
               kernel_size=(3, 3, 3),
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))
    model.summary()
    print('model.input=', model.input)
    print('model.input.name=', model.input.name)
    print('model.input.shape=', model.input.shape)
    print('model.output=', model.output)
    print('model.output.name=', model.output.name)
    print('model.output.shape=', model.output.shape)
    return model
예제 #19
0
def decoder_model():
    inputs = Input(shape=(10, 16, 16, 64))

    # 10x16x16
    convlstm_1 = ConvLSTM2D(filters=64,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.5)(inputs)
    x = TimeDistributed(BatchNormalization())(convlstm_1)
    x = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    out_1 = TimeDistributed(Dropout(0.5))(x)

    convlstm_2 = ConvLSTM2D(filters=64,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.5)(out_1)
    x = TimeDistributed(BatchNormalization())(convlstm_2)
    h_2 = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    out_2 = UpSampling3D(size=(1, 2, 2))(h_2)

    # 10x32x32
    convlstm_3 = ConvLSTM2D(filters=128,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.5)(out_2)
    x = TimeDistributed(BatchNormalization())(convlstm_3)
    h_3 = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    out_3 = UpSampling3D(size=(1, 2, 2))(h_3)

    # 10x64x64
    convlstm_4 = ConvLSTM2D(filters=32,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.5)(out_3)
    x = TimeDistributed(BatchNormalization())(convlstm_4)
    h_4 = TimeDistributed(LeakyReLU(alpha=0.2))(x)
    out_4 = UpSampling3D(size=(1, 2, 2))(h_4)

    # 10x128x128
    convlstm_5 = ConvLSTM2D(filters=3,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.5)(out_4)
    predictions = TimeDistributed(Activation('tanh'))(convlstm_5)

    model = Model(inputs=inputs, outputs=predictions)

    return model
예제 #20
0
    def CNN_convLSTM(activation="relu",
                     loss="binary_crossentropy",
                     optimizer="Adadelta",
                     layer=0,
                     height=0,
                     width=0,
                     days=0,
                     timesteps=0):
        """
        INPUT -> [CONV -> RELU] -> OUT
        """
        model = Sequential()

        # model.add(ZeroPadding3D((1, 1, 1), data_format="channels_last",
        # input_shape=(timesteps, height, width, layer)))
        # model.add(BatchNormalization())
        model.add(
            ConvLSTM2D(filters=16,
                       kernel_size=(3, 3),
                       padding='same',
                       activation=activation,
                       data_format="channels_last",
                       input_shape=(None, height, width, layer),
                       return_sequences=True))
        model.add(BatchNormalization())
        model.add(
            ConvLSTM2D(filters=16,
                       kernel_size=(3, 3),
                       padding='same',
                       activation=activation,
                       data_format="channels_last",
                       return_sequences=True))
        model.add(BatchNormalization())
        model.add(
            ConvLSTM2D(filters=32,
                       kernel_size=(3, 3),
                       padding='same',
                       activation=activation,
                       data_format="channels_last",
                       return_sequences=True))
        model.add(BatchNormalization())
        model.add(
            ConvLSTM2D(filters=64,
                       kernel_size=(3, 3),
                       padding='same',
                       activation=activation,
                       data_format="channels_last",
                       return_sequences=True))
        model.add(BatchNormalization())
        model.add(
            Convolution3D(filters=layer,
                          kernel_size=(3, 3, layer),
                          padding="same",
                          data_format="channels_last"))

        model.compile(loss=loss, optimizer=optimizer)
        return model
예제 #21
0
def ConvLSTMs():
    model = Sequential()
    model.add(ConvLSTM2D(filters = 32, kernel_size = (3, 3),
                         padding = 'same', return_sequences = True,
                        input_shape = (None, HEIGHT, WIDTH, CHANNEL)))
    model.add(ConvLSTM2D(filters = 32, kernel_size = (3, 3),
                         padding = 'same', return_sequences = True))
    model.add(ConvLSTM2D(filters = 32, kernel_size = (3, 3),
                         padding = 'same', return_sequences = False))
    return model
예제 #22
0
    def get_model(self):
        size = self.args.size
        channel = self.args.channel
        n_step = self.args.n_step
        activation = self.args.activation
        optimizer = self.args.optimizer
        loss = self.args.loss
        convlstm_kernel = self.args.convlstm_kernel
        conv_kernel = self.args.conv_kernel

        with tf.device('/GPU:0'):
            model = Sequential()
            model.add(
                ConvLSTM2D(filters=1,
                           kernel_size=(convlstm_kernel, convlstm_kernel),
                           data_format='channels_first',
                           input_shape=(n_step, channel, size, size),
                           padding='same',
                           return_sequences=True))
            model.add(BatchNormalization())

            model.add(
                ConvLSTM2D(filters=1,
                           kernel_size=(convlstm_kernel, convlstm_kernel),
                           data_format='channels_first',
                           padding='same',
                           return_sequences=True))
            model.add(BatchNormalization())

            model.add(
                ConvLSTM2D(filters=1,
                           kernel_size=(convlstm_kernel, convlstm_kernel),
                           data_format='channels_first',
                           padding='same',
                           return_sequences=True))
            model.add(BatchNormalization())

            model.add(
                ConvLSTM2D(filters=1,
                           kernel_size=(convlstm_kernel, convlstm_kernel),
                           data_format='channels_first',
                           padding='same',
                           return_sequences=True))
            model.add(BatchNormalization())

            model.add(
                Conv3D(filters=1,
                       kernel_size=(conv_kernel, conv_kernel, conv_kernel),
                       activation=activation,
                       padding='same',
                       data_format='channels_first'))

            model.compile(optimizer=optimizer, loss=loss)

        return model
예제 #23
0
def getModel(x_dim, meta_dim):
    # Input xc, xp, xt --> hct1, hP1, hP2
    XC = Input(shape=x_dim)
    XP = Input(shape=x_dim)
    XT = Input(shape=x_dim)

    shared_model = Sequential()
    shared_model.add(ConvLSTM2D(filters=32, kernel_size=(3, 3),
                                padding='same', return_sequences=True, input_shape=x_dim))
    shared_model.add(ConvLSTM2D(filters=32, kernel_size=(3, 3),
                                padding='same', return_sequences=True))
    shared_model.add(ConvLSTM2D(filters=32, kernel_size=(3, 3),
                                padding='same', return_sequences=False))

    hct = shared_model(XC)
    hP1 = shared_model(XP)
    hP2 = shared_model(XT)

    # Weighting based fusion
    # daily
    concate1 = Concatenate()([hct, hP1])
    conv1 = Conv2D(filters=32, kernel_size=(1, 1), padding='same')(concate1)

    # weekly
    concate2 = Concatenate()([hct, hP2])
    conv2 = Conv2D(filters=32, kernel_size=(1, 1), padding='same')(concate2)

    x1 = Lambda(lambda x: x[:, :, :, :, np.newaxis])(conv1)
    x2 = Lambda(lambda x: x[:, :, :, :, np.newaxis])(conv2)
    conv = Concatenate()([x1, x2])

    a = Dense(2, activation='softmax')(conv)
    ax = multiply([conv, a])
    ax1 = Lambda(lambda x: x[:, :, :, :, 0])(ax)
    ax2 = Lambda(lambda x: x[:, :, :, :, 1])(ax)
    hPallt = add([ax1, ax2])

    # hadamard fusion
    hft = Hadamard_fusion()([hct, hPallt])

    # transform shape
    hft_reshap = Conv2D(filters=CHANNEL, kernel_size=(1, 1),
                        activation='relu', padding='same')(hft)

    # metadata fusion
    Xmeta = Input(shape=(meta_dim,))
    dens1 = Dense(units=10, activation='relu')(Xmeta)
    dens2 = Dense(units=WIDTH * HEIGHT * CHANNEL, activation='relu')(dens1)
    hmeta = Reshape((HEIGHT, WIDTH, CHANNEL))(dens2)

    add2 = Add()([hft_reshap, hmeta])
    X_hat = Activation('relu')(add2)

    model = Model(inputs=[XC, XP, XT, Xmeta], outputs=X_hat)
    return model
예제 #24
0
def main():
    rawdir = './fsl_20161018-22'
    data_set = read_radar(rawdir)
    data_set.generate_radarfsl()
    n_pixel = 30

    seq = Sequential()
    seq.add(
        ConvLSTM2D(filters=n_pixel,
                   kernel_size=(3, 3),
                   input_shape=(None, n_pixel, n_pixel, 1),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(
        ConvLSTM2D(filters=n_pixel,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(
        ConvLSTM2D(filters=n_pixel,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(
        ConvLSTM2D(filters=n_pixel,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(
        Conv3D(filters=1,
               kernel_size=(3, 3, 3),
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))

    seq.compile(loss='mean_squared_error', optimizer='adadelta')

    # Train the network
    noisy_movies, shifted_movies = data_set.train_data, data_set.shifted_data
    seq.fit(noisy_movies[:1000],
            shifted_movies[:1000],
            batch_size=10,
            epochs=50,
            validation_split=0.05)

    #save model
    seq.save('ConvLSTM.h5')
예제 #25
0
def darknet_body_r(image_input_td, td_len, mode):

    image_input = Input(shape=(None, None, 3))  # (320, 320, 3)
    skip_conn = []

    x = DarknetConv2D_BN_Leaky(32, (3, 3))(image_input)
    print(len(Model(image_input, x).layers))
    x = resblock_body(x, 64, 1)
    print(len(Model(image_input, x).layers))
    x = resblock_body(x, 128, 2)
    print(len(Model(image_input, x).layers))
    x = resblock_body(x, 256, 8)
    print(len(Model(image_input, x).layers))
    x = Model(image_input, x)
    print('-' * 20)

    x = TimeDistributed(x)(image_input_td)
    #	x = TimeDistributed(ZeroPadding2D(((1,0),(1,0))))(x)

    if mode == 'lstm':
        x = ConvLSTM2D(256,
                       kernel_size=(3, 3),
                       padding='same',
                       activation='relu')(x)
    elif mode == 'bilstm':
        #		x = TimeDistributed(ZeroPadding2D(((1,0),(1,0))))(x)
        x = Bidirectional(
            ConvLSTM2D(256,
                       kernel_size=(3, 3),
                       padding='same',
                       activation='relu'))(x)
    elif mode == '3d':
        x = Conv3D(256,
                   kernel_size=(td_len, 3, 3),
                   padding='valid',
                   activation='relu')(x)
        x = Lambda(lambda x: x[:, 0, :, :])(x)
        x = ZeroPadding2D(((2, 0), (2, 0)))(x)
    else:
        raise ValueError('Recurrent mode not recognized')

    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.1)(x)
    print(len(Model(image_input_td, x).layers))
    skip_conn.append(len(Model(image_input_td, x).layers) - 1)

    x = resblock_body(x, 512, 8)
    print(len(Model(image_input_td, x).layers))
    skip_conn.append(len(Model(image_input_td, x).layers) - 1)
    x = resblock_body(x, 1024, 4)
    print(len(Model(image_input_td, x).layers))

    return x, skip_conn
예제 #26
0
def __transition_up_block(ip,
                          nb_filters,
                          type='upsampling',
                          output_shape=None,
                          padding_param='same',
                          weight_decay=1E-4):
    ''' SubpixelConvolutional Upscaling (factor = 2)
    Args:
        ip: keras tensor
        nb_filters: number of layers
        type: can be 'upsampling', 'subpixel', 'deconv', or 'atrous'. Determines type of upsampling performed
        output_shape: required if type = 'deconv'. Output shape of tensor
        weight_decay: weight decay factor
    Returns: keras tensor, after applying upsampling operation.
    '''

    if type == 'upsampling':
        x = UpSampling2D()(ip)
    elif type == 'subpixel':
        x = ConvLSTM2D(nb_filters, (3, 3),
                       activation="relu",
                       padding='same',
                       kernel_regularizer=l2(weight_decay),
                       use_bias=False,
                       kernel_initializer='he_uniform',
                       return_sequences=True)(ip)
        x = SubPixelUpscaling(scale_factor=2)(x)
        x = ConvLSTM2D(nb_filters, (3, 3),
                       activation="relu",
                       padding='same',
                       kernel_regularizer=l2(weight_decay),
                       use_bias=False,
                       kernel_initializer='he_uniform',
                       return_sequences=True)(x)
    elif type == 'atrous':
        # waiting on https://github.com/fchollet/keras/issues/4018
        x = AtrousConvolution2D(nb_filters, (3, 3),
                                activation="relu",
                                kernel_regularizer=l2(weight_decay),
                                use_bias=False,
                                atrous_rate=(2, 2),
                                kernel_initializer='he_uniform')(ip)
    else:
        #x = Conv2DTranspose(nb_filters, (3, 3), activation='relu', padding=padding_param,
        #                    strides=(2, 2), kernel_initializer='he_uniform')(ip)
        x = TimeDistributed(
            Conv2DTranspose(nb_filters, (3, 3),
                            activation='relu',
                            padding=padding_param,
                            strides=(2, 2),
                            kernel_initializer='he_uniform'))(ip)

    return x
예제 #27
0
def convLSTM_Model_3():

    model = Sequential()
    model.add(
        ConvLSTM2D(filters=32,
                   strides=(1, 1),
                   kernel_size=(7, 7),
                   recurrent_activation='hard_sigmoid',
                   input_shape=(None, None, None, 1),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(filters=16,
                   strides=(1, 1),
                   kernel_size=(7, 7),
                   recurrent_activation='hard_sigmoid',
                   input_shape=(None, None, None, 1),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(filters=16,
                   strides=(1, 1),
                   kernel_size=(7, 7),
                   recurrent_activation='hard_sigmoid',
                   input_shape=(None, None, None, 1),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(filters=16,
                   strides=(1, 1),
                   kernel_size=(7, 7),
                   recurrent_activation='hard_sigmoid',
                   input_shape=(None, None, None, 1),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    #model.add(Conv3D(filters=1, kernel_size=(3, 3, 3), activation='sigmoid',  padding='same', data_format='channels_last'))
    model.add(
        Conv3D(filters=1,
               kernel_size=(1, 1, 1),
               activation='sigmoid',
               padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(2, 1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(5, 1, 1), padding='same'))
    #model.add(Reshape((None, None, 1), input_shape=(None,None,None,1)))
    print(model.summary())
    return model
def classifier_model():
    inputs = Input(shape=(10, 128, 128, 3))
    conv_1 = ConvLSTM2D(filters=32,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(inputs)
    conv_1 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_1)
    conv_1 = TimeDistributed(Dropout(0.5))(conv_1)

    conv_2 = ConvLSTM2D(filters=64,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(conv_1)
    conv_2 = TimeDistributed(BatchNormalization())(conv_2)
    conv_2 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_2)
    conv_2 = TimeDistributed(Dropout(0.5))(conv_2)

    conv_3 = ConvLSTM2D(filters=128,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(conv_2)
    conv_3 = TimeDistributed(BatchNormalization())(conv_3)
    conv_3 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_3)
    conv_3 = TimeDistributed(Dropout(0.5))(conv_3)

    conv_4 = ConvLSTM2D(filters=256,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding="same",
                        return_sequences=True,
                        recurrent_dropout=0.5,
                        kernel_regularizer=regularizers.l1(0.001))(conv_3)
    conv_4 = TimeDistributed(BatchNormalization())(conv_4)
    conv_4 = TimeDistributed(LeakyReLU(alpha=0.2))(conv_4)
    conv_4 = TimeDistributed(Dropout(0.5))(conv_4)

    flat_1 = TimeDistributed(Flatten())(conv_4)
    dense_1 = TimeDistributed(Dense(units=1024, activation='tanh'))(flat_1)
    dense_2 = TimeDistributed(Dense(units=6, activation='sigmoid'))(dense_1)

    model = Model(inputs=inputs, outputs=dense_2)

    return model
예제 #29
0
def getModel(x_dim, meta_dim):
    # Input xc, xp, xt --> hct1, hP1, hP2
    XC = Input(shape=x_dim)
    XP = Input(shape=x_dim)
    XT = Input(shape=x_dim)

    shared_model = Sequential()
    shared_model.add(ConvLSTM2D(filters=32, kernel_size=(3, 3),
                                padding='same', return_sequences=True, input_shape=x_dim))
    shared_model.add(ConvLSTM2D(filters=32, kernel_size=(3, 3),
                                padding='same', return_sequences=True))
    shared_model.add(ConvLSTM2D(filters=32, kernel_size=(3, 3),
                                padding='same', return_sequences=False))

    hct1 = shared_model(XC)
    hP1 = shared_model(XP)
    hP2 = shared_model(XT)

    # Weighting based fusion
    # daily
    concate1 = Concatenate()([hct1, hP1])
    conv1 = Conv2D(filters=32, kernel_size=(1, 1), padding='same')(concate1)
    flat1 = Flatten()(conv1)
    ej1 = Dense(1)(flat1)

    # weekly
    concate2 = Concatenate()([hct1, hP2])
    conv2 = Conv2D(filters=32, kernel_size=(1, 1), padding='same')(concate2)
    flat2 = Flatten()(conv2)
    ej2 = Dense(1)(flat2)

    aj1 = Lambda(softmax)([ej1, ej2])
    aj2 = Lambda(softmax)([ej2, ej1])
    hPallt = Add()([multiply([aj1, hP1]), multiply([aj2, hP2])])

    hft = Hadamard_fusion()([hct1, hPallt])

    # transform shape
    hft_reshap = Conv2D(filters=CHANNEL, kernel_size=(1, 1),
                        activation='relu', padding='same')(hft)

    # metadata fusion
    Xmeta = Input(shape=(meta_dim,))
    dens1 = Dense(units=10, activation='relu')(Xmeta)
    dens2 = Dense(units=WIDTH * HEIGHT * CHANNEL, activation='relu')(dens1)
    hmeta = Reshape((HEIGHT, WIDTH, CHANNEL))(dens2)

    add2 = Add()([hft_reshap, hmeta])
    X_hat = Activation('relu')(add2)

    model = Model(inputs=[XC, XP, XT, Xmeta], outputs=X_hat)
    return model
예제 #30
0
def decoder_model():
    inputs = Input(shape=(int(VIDEO_LENGTH / 2), 16, 26, 64))

    # 10x16x16
    convlstm_1 = ConvLSTM2D(filters=64,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.2)(inputs)
    x = TimeDistributed(BatchNormalization())(convlstm_1)
    out_1 = TimeDistributed(Activation('tanh'))(x)

    res_1 = UpSampling3D(size=(1, 2, 2))(out_1)

    # 10x32x32
    convlstm_3a = ConvLSTM2D(filters=64,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             padding='same',
                             return_sequences=True,
                             recurrent_dropout=0.2)(res_1)
    x = TimeDistributed(BatchNormalization())(convlstm_3a)
    out_3a = TimeDistributed(Activation('tanh'))(x)

    res_2 = UpSampling3D(size=(1, 2, 2))(out_3a)

    # 10x64x64
    convlstm_4a = ConvLSTM2D(filters=32,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             padding='same',
                             return_sequences=True,
                             recurrent_dropout=0.2)(res_2)
    x = TimeDistributed(BatchNormalization())(convlstm_4a)
    out_4a = TimeDistributed(Activation('tanh'))(x)

    res_3 = UpSampling3D(size=(1, 2, 2))(out_4a)

    # 10x128x128
    convlstm_5 = ConvLSTM2D(filters=3,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same',
                            return_sequences=True,
                            recurrent_dropout=0.2)(res_3)
    predictions = TimeDistributed(Activation('tanh'))(convlstm_5)

    model = Model(inputs=inputs, outputs=predictions)

    return model