예제 #1
0
    def create_model_cnn_rnn_extracted_inference(self):
        K.set_learning_phase(0)
        input_images = Input(batch_shape=(self.batch_size, 1, self.image_h,
                                          self.image_w, 3),
                             name='images_input')
        feature_detector_model = self.detector.get_feature_model(
            is_before_activation=False)

        print("\nSummary of feature detector:")
        feature_detector_model.summary()
        yolo_feats_seq = TimeDistributed(feature_detector_model,
                                name='each_frame_feats')\
                                        (input_images)

        recurrent_state = ConvLSTM2D(256, (1, 1), strides=(1, 1), padding='same',
                return_sequences=True, stateful=True, name='conv_lstm_1')\
                    (yolo_feats_seq)
        recurrent_state = ConvLSTM2D(256, (1, 1), strides=(1, 1), padding='same',
                return_sequences=False, stateful=True, name='conv_lstm_2')\
                    (recurrent_state)
        output_conv = Conv2D(self.nb_box * (4 + 1 + self.nb_class), (1, 1),
                             strides=(1, 1),
                             padding='same',
                             kernel_initializer='lecun_normal',
                             name='track_conv')(recurrent_state)
        output_reshaped = Reshape((self.grid_h, self.grid_w, self.nb_box,
                                   4 + 1 + self.nb_class))(output_conv)
        model = Model(input_images, output_reshaped, name='cnn+rnn model')

        print("\nFull MODEL:")
        model.summary()
        return model
예제 #2
0
def build_model():
    seq = Sequential()
    seq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,
                       input_shape=(512, 512, 512, 1),
                       border_mode='same', return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,
                       border_mode='same', return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,
                       border_mode='same', return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,
                       border_mode='same', return_sequences=False))
    seq.add(BatchNormalization())
    seq.add(Flatten())

    seq.add(Dense(100, activation='tanh'))
    seq.add(Dense(100, activation='tanh'))
    seq.add(Dense(1, activation='sigmoid'))

    seq.compile(loss='binary_crossentropy', optimizer='nadam')

    return seq
예제 #3
0
def create_conv2dlstm_model():
    input = Input(shape=input_shape)
    #input_shape = (batch_size, rows,cols, size_sequence)
    conv1 = ConvLSTM2D(filters=64,
                       input_shape=(None, img_rows, img_cols, size_sequence),
                       kernel_size=(3, 3),
                       strides=(2, 2),
                       padding='same',
                       kernel_initializer='random_uniform')(input)
    if dropout:
        conv1 = Dropout(0.5)(conv1)
    conv2 = ConvLSTM2D(filters=32,
                       kernel_size=(3, 3),
                       padding='same',
                       kernel_initializer='random_uniform')(conv1)

    data = Flatten()(conv2)

    output = Dense(32)(data)
    output = Dense(5)(output)

    model = Model(inputs=input, outputs=output)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
예제 #4
0
def _build_network(sequence_length, img_width, img_height):
    model = Sequential()
    model.add(
        ConvLSTM2D(
            filters=128,
            kernel_size=(3, 3),
            input_shape=(sequence_length, img_width, img_height, 1),
            padding='same',
            return_sequences=True,
        ))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(
            filters=64,
            kernel_size=(3, 3),
            padding='same',
            return_sequences=True,
        ))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(
            filters=64,
            kernel_size=(3, 3),
            padding='same',
            return_sequences=True,
        ))
    model.add(BatchNormalization())
    model.add(
        ConvLSTM2D(
            filters=1,
            kernel_size=(3, 3),
            padding='same',
            return_sequences=True,
        ))
    return model
예제 #5
0
def load_model():
	"""
	Return the model used for abnormal event 
	detection in videos using spatiotemporal autoencoder

	"""
	model=Sequential()
	model.add(Conv3D(filters=128,kernel_size=(11,11,1),strides=(4,4,1),padding='valid',input_shape=(227,227,10,1),activation='tanh'))
	model.add(Conv3D(filters=64,kernel_size=(5,5,1),strides=(2,2,1),padding='valid',activation='tanh'))



	model.add(ConvLSTM2D(filters=64,kernel_size=(3,3),strides=1,padding='same',dropout=0.4,recurrent_dropout=0.3,return_sequences=True))

	
	model.add(ConvLSTM2D(filters=32,kernel_size=(3,3),strides=1,padding='same',dropout=0.3,return_sequences=True))


	model.add(ConvLSTM2D(filters=64,kernel_size=(3,3),strides=1,return_sequences=True, padding='same',dropout=0.5))




	model.add(Conv3DTranspose(filters=128,kernel_size=(5,5,1),strides=(2,2,1),padding='valid',activation='tanh'))
	model.add(Conv3DTranspose(filters=1,kernel_size=(11,11,1),strides=(4,4,1),padding='valid',activation='tanh'))

	model.compile(optimizer='adam',loss='mean_squared_error',metrics=['accuracy'])

	return model
예제 #6
0
def loadWeights():
    seq.add(
        ConvLSTM2D(filters=5,
                   kernel_size=(3, 3),
                   input_shape=(29, 256, 256, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(TimeDistributed(MaxPooling2D((2, 2), (2, 2))))
    seq.add(TimeDistributed(MaxPooling2D((2, 2), (2, 2))))
    seq.add(
        ConvLSTM2D(filters=5,
                   kernel_size=(3, 3),
                   input_shape=(29, 256, 256, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(TimeDistributed(MaxPooling2D((2, 2), (2, 2))))
    seq.add(
        ConvLSTM2D(filters=5,
                   kernel_size=(3, 3),
                   input_shape=(29, 256, 256, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(TimeDistributed(MaxPooling2D((2, 2), (2, 2))))
    seq.add(Flatten())
    seq.add(Dense(1024))
    seq.add(Dense(3))
    seq.add(Activation('softmax'))
    seq.compile(loss='categorical_crossentropy',
                optimizer='ADAM',
                metrics=['accuracy'])
    seq.load_weights('weights-improvement-15-0.88.hdf5')
    print('i am done')
예제 #7
0
def getLSTMLayer(inputLayerName, Ns, seq_len=3, backend='tf'):
    '''
    Ns=size of input image, assuming square
    seq_len= # of samples
    '''
    if backend == 'tf':
        input_shape = (seq_len, Ns, Ns, 1)  # samples, times, h, w, c
    else:
        input_shape = (seq_len, 1, Ns, Ns)  # samples, times, c, h, w

    inputLayer = Input(shape=input_shape, name=inputLayerName)

    layer = ConvLSTM2D(32,
                       kernel_size=(3, 3),
                       padding="same",
                       kernel_regularizer=regularizers.l2(0.01),
                       bias_initializer='zeros')(inputLayer)
    layer = BatchNormalization()(layer)
    layer = ConvLSTM2D(16,
                       kernel_size=(3, 3),
                       padding="same",
                       return_sequences=True)(layer)
    layer = BatchNormalization()(layer)
    layer = ConvLSTM2D(16,
                       kernel_size=(3, 3),
                       padding="same",
                       return_sequences=True)(layer)
    layer = BatchNormalization()(layer)
    #layer = Conv2D(32, kernel_size=(3,3), padding="same",  bias_initializer='zeros', activation="relu")(layer)
    #layer = Conv2D(16, kernel_size=(3,3), padding="same",  bias_initializer='zeros', activation="relu")(layer)
    #layer = BatchNormalization()(layer)
    return layer, inputLayer
예제 #8
0
def Get_Model(kernel=21, height=501, width=501, batch_size=1, times=1):
    model = Sequential()
    model.add(
        ConvLSTM2D(filters=3,
                   batch_input_shape=(batch_size, times, height, width, 1),
                   kernel_size=(kernel, kernel),
                   padding='same',
                   return_sequences=True,
                   stateful=True))
    model.add(
        ConvLSTM2D(filters=6,
                   batch_input_shape=(batch_size, times, height, width, 1),
                   kernel_size=(kernel, kernel),
                   padding='same',
                   return_sequences=True,
                   stateful=True))
    model.add(
        ConvLSTM2D(filters=6,
                   batch_input_shape=(batch_size, times, height, width, 1),
                   kernel_size=(kernel, kernel),
                   padding='same',
                   return_sequences=True,
                   stateful=True))
    model.add(
        TimeDistributed(Conv2D(filters=1, kernel_size=(9, 9), padding='same')))
    optimizer = RMSprop(lr=0.00001, rho=0.9, epsilon=1e-08, decay=0.0)
    model.compile(loss='mae', optimizer=optimizer)
    return model
예제 #9
0
def _build_network(sequence_length, img_width, img_height):
    seq = Sequential()
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   input_shape=(None, img_width, img_height, 1),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        Conv3D(filters=1,
               kernel_size=(3, 3, 3),
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))
    return seq
예제 #10
0
def create_cnn(rows,columns, deep):
    # initialize the input shape and channel dimension, assuming
    # TensorFlow/channels-last ordering
    inputShape = (1, 1, rows, columns)
    model = Sequential()

    model.add(ConvLSTM2D(32, (16, 16), strides=(3,3),
        activation='relu', return_sequences=True, data_format='channels_first', 
                         padding='same', input_shape=inputShape, batch_input_shape=[1, deep, 1, rows, columns], stateful=True))
    model.add(ConvLSTM2D(64, (3,3), strides=(2,2),
        kernel_initializer="he_normal", return_sequences=True, activation='relu', data_format='channels_first', stateful=True ))
    model.add(TimeDistributed(MaxPooling2D((2, 2),  data_format='channels_first' )))
    model.add(ConvLSTM2D(64, (3,3), strides=(2, 2),
        padding='same', activation='relu', data_format='channels_first', return_sequences=True, stateful=True ))
    model.add(TimeDistributed(MaxPooling2D((2, 2),  data_format='channels_first')))


    model.add(TimeDistributed(Flatten()))

    model.add(LSTM(64, return_sequences=True, stateful=True))
    model.add(LSTM(32, return_sequences=False, stateful=True))
    model.add(Dense(1, activation='linear'))
    
    adam = Adam(lr=0.00001, decay=1e-6)
    model.compile(loss='mean_squared_error', optimizer=adam)
    
    return model
예제 #11
0
def get_siamese_model(input_shape):

    # Define the tensors for the two input images
    left_input = Input(input_shape)
    right_input = Input(input_shape)

    # Convolutional Neural Network [Edit here to use different models]
    model = Sequential()
    model.add(
        ConvLSTM2D(16, (3, 3),
                   kernel_initializer=initialize_weights,
                   activation='relu',
                   padding='same',
                   return_sequences=True,
                   input_shape=input_shape))
    model.add(TimeDistributed(MaxPooling2D((4, 4))))
    model.add(
        ConvLSTM2D(16, (3, 3),
                   kernel_initializer=initialize_weights,
                   activation='relu',
                   padding='same',
                   return_sequences=True,
                   input_shape=input_shape))
    model.add(TimeDistributed(MaxPooling2D((4, 4))))
    model.add(
        ConvLSTM2D(16, (3, 3),
                   kernel_initializer=initialize_weights,
                   activation='relu',
                   padding='same',
                   return_sequences=True,
                   input_shape=input_shape))
    model.add(TimeDistributed(MaxPooling2D((4, 4))))

    model.add(Flatten())
    model.add(
        Dense(
            2048,
            activation='sigmoid',
            kernel_regularizer=l2(1e-3),
            kernel_initializer=initialize_weights,
            bias_initializer=initialize_bias,
        ))

    # Generate the encodings (feature vectors) for the two images
    encoded_l = model(left_input)
    encoded_r = model(right_input)

    # Add a customized layer to compute the absolute difference between the encodings
    L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))
    L1_distance = L1_layer([encoded_l, encoded_r])

    # Add a dense layer with a sigmoid unit to generate the similarity score
    prediction = Dense(1, activation='sigmoid')(L1_distance)

    # Connect the inputs with the outputs
    siamese_net = Model(inputs=[left_input, right_input], outputs=prediction)

    # return the model
    return siamese_net
예제 #12
0
def train_Conv2DLstm(data, label):

    train_Data = data[0: int(len(data) * train_ratio)]
    train_Label = label[0: int(len(data) * train_ratio)]
    train_Data = np.reshape(train_Data, (train_Data.shape[0],  train_Data.shape[1], 1,train_Data.shape[2],train_Data.shape[3]) )
    #train_Label = np.reshape(train_Label, (train_Label.shape[0],train_Label.shape[1], 1))

    test_Data = data[int(len(data) * train_ratio): len(data)]
    test_Label = label[int(len(data) * train_ratio): len(data)]
    test_Data = np.reshape(test_Data, (test_Data.shape[0], test_Data.shape[1], 1,test_Data.shape[2], test_Data.shape[3]))
    #test_Label = np.reshape(test_Label, (test_Label.shape[0], test_Label.shape[1], 1))

    model = Sequential()
    model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
                       input_shape=(date_size, 1, 3, 6), #day/ - / feature/ feature
                       padding='same', return_sequences=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
                       padding='same', return_sequences=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
                       padding='same', return_sequences=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
                       padding='same', return_sequences=True))
    model.add(BatchNormalization())
    model.add(Conv3D(filters=1, kernel_size=(3, 3, 1),padding='same', data_format='channels_last'))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dense(256))
    model.add(Dense(1, activation='linear'))

    model.compile(loss='mse', optimizer='rmsprop', metrics=['acc'])

    print('Model Build...')
    model.summary()

    print('Train...')
    model.fit(train_Data, train_Label,
              epochs=5,
              batch_size=32, verbose=2,
              shuffle=False,
              validation_data=(test_Data, test_Label))

    testPredict = model.predict(test_Data)
    #testScore = math.sqrt(mean_squared_error(test_Label, testPredict))
    #print('Train Score: %.2f RMSE' % testScore)

    fig = plt.figure(facecolor='white', figsize=(10, 5))
    ax = fig.add_subplot(111)
    ax.plot(test_Label, label='True')
    ax.plot(testPredict, label='Prediction')
    ax.legend()
    plt.show()
예제 #13
0
        def model_creator(filters, kernel, dropout, NL):

            __model = Sequential()
            __model.add(
                BatchNormalization(name='batch_norm_0',
                                   input_shape=(in_dt, input_bus.shape[2],
                                                input_bus.shape[3],
                                                input_bus.shape[4])))

            for N in range(NL):
                if N == (NL - 1):
                    __model.add(
                        ConvLSTM2D(name=('lstm_%i_enc' % N),
                                   filters=filters[N],
                                   kernel_size=(kernel[N], 1),
                                   stateful=False,
                                   padding='same',
                                   return_sequences=False,
                                   data_format="channels_first"))
                else:
                    __model.add(
                        ConvLSTM2D(name=('lstm_%i_enc' % N),
                                   filters=filters[N],
                                   kernel_size=(kernel[N], 1),
                                   stateful=False,
                                   padding='same',
                                   return_sequences=True,
                                   data_format="channels_first"))

                __model.add(Dropout(dropout))

                __model.add(BatchNormalization())

            __model.add(Flatten())

            __model.add(RepeatVector(out_dt))

            __model.add(Reshape((out_dt, output_bus.shape[2], 1, filters[-1])))

            for N in range(NL):

                __model.add(
                    ConvLSTM2D(name=('lstm_%i_dec' % N),
                               filters=filters[-N],
                               kernel_size=(kernel[-N], 1),
                               stateful=False,
                               padding='same',
                               return_sequences=True))
                __model.add(Dropout(dropout))

                __model.add(BatchNormalization())

            __model.add(
                TimeDistributed(Dense(1, activation="sigmoid"), name='test'))

            return __model
            del (__model)
예제 #14
0
파일: new_models.py 프로젝트: geolvr/ADSNet
def ADSNet_O():
    # encoder: layers definition && data flow  --------------------------------------
    # CNN module 1 -------------------------------------
    encoder_inputs = Input(shape=(num_frames_truth, 159, 159, 1), name='encoder_inputs')  # (bs, 3, 159, 159, 1)
    encoder_conv2d_1 = TimeDistributed(Conv2D(filters=4, kernel_size=(5, 5), padding='same'),
                                       name='en_conv2d_1')(encoder_inputs)
    encoder_conv2d_1 = TimeDistributed(Activation('relu'))(encoder_conv2d_1)
    encoder_conv2d_1 = TimeDistributed(MaxPooling2D(padding='same'))(encoder_conv2d_1)
    encoder_conv2d_2 = TimeDistributed(Conv2D(filters=4, kernel_size=(5, 5), padding='same'),
                                       name='en_conv2d_2')(encoder_conv2d_1)
    encoder_conv2d_2 = TimeDistributed(Activation('relu'))(encoder_conv2d_2)
    encoder_conv2d_2 = TimeDistributed(MaxPooling2D(padding='same'))(encoder_conv2d_2)

    # ---------------------------------------------------
    _, en_h, en_c = ConvLSTM2D(filters=8, kernel_size=(5, 5), return_sequences=True, return_state=True, padding='same',
                               name='en_convlstm')(encoder_conv2d_2)
    # --------------------------------------------------------------------------------
    # # encoder to decoder: layers definition && data flow  --------------------
    en_h = Conv2D(filters=16, kernel_size=(1, 1), padding="same", name='en_de_h', activation='relu')(en_h)
    en_c = Conv2D(filters=16, kernel_size=(1, 1), padding="same", name='en_de_c', activation='relu')(en_c)
    # --------------------------------------------------------------------------------
    # decoder: layers definition && dataflow -----------------------------------------
    # CNN module 2 -----------------------------------------------------------
    de_conv2d_1 = TimeDistributed(Conv2D(filters=4, kernel_size=(5, 5), padding='same',activation='relu'), name='de_conv2d_1')
    de_conv2d_2 = TimeDistributed(Conv2D(filters=4, kernel_size=(5, 5), padding='same',activation='relu'), name='de_conv2d_2')
    # -------------------------------------------------------------------------
    # DCNN module ------------------------------------------------------------
    de_conv2dT_1 = TimeDistributed(Conv2DTranspose(filters=32, kernel_size=(5, 5), strides=(2, 2), padding='same',activation='relu'),
                                    name='de_conv2dT_1')
    de_conv2dT_2 = TimeDistributed(Conv2DTranspose(filters=32, kernel_size=(5, 5), strides=(2, 2), padding='same',activation='relu'),
                                   name='de_conv2dT_2')
    de_conv_out = TimeDistributed(Conv2D(filters=1, kernel_size=(1, 1), padding="same"), name='de_conv_out')
    # ---------------------------------------------------------------------------------
    de_convlstm = ConvLSTM2D(filters=16, return_sequences=True, return_state=True,
                                  kernel_size=(5, 5), name='de_convlstm', padding='same')
    decoder_input_t = Cropping3D(data_format='channels_last', cropping=((num_frames_truth - 1, 0), (0, 0), (0, 0)))(encoder_inputs)
    out_list = []
    de_h = en_h
    de_c = en_c
    cropper = Cropping3D(cropping=((0, 0), (0, 1), (0, 1)))
    sigmoid = Activation('sigmoid')
    for t in range(num_frames):
        decoder_conv2d_1 = de_conv2d_1(decoder_input_t)
        decoder_conv2d_1 = TimeDistributed(MaxPooling2D(padding='same'))(decoder_conv2d_1)
        decoder_conv2d_2 = de_conv2d_2(decoder_conv2d_1)
        decoder_conv2d_2 = TimeDistributed(MaxPooling2D(padding='same'))(decoder_conv2d_2)
        decoder_convlstm_t, de_h, de_c = de_convlstm([decoder_conv2d_2, de_h, de_c])
        decoder_conv2dT_1 = de_conv2dT_1(decoder_convlstm_t)
        decoder_conv2dT_2 = de_conv2dT_2(decoder_conv2dT_1)
        decoder_out_t = de_conv_out(decoder_conv2dT_2)
        decoder_out_t = cropper(decoder_out_t)
        out_list.append(decoder_out_t)
        decoder_input_t = sigmoid(decoder_out_t)

    decoder_outputs = Lambda(lambda x: K.concatenate(x, axis=1))(out_list)  # (bs, 12, 159, 159, 1)
    decoder_outputs = Reshape((-1, 159 * 159, 1), input_shape=(-1, 159, 159, 1))(decoder_outputs)
    return Model(encoder_inputs, decoder_outputs, name='ADSNet_O')
예제 #15
0
def get_model(train=True):
    """
    Parameters
    ----------
    reload_model : bool
        Load saved model or retrain it
    """

    if not train:
        return load_model(
            MODEL_PATH,
            custom_objects={'LayerNormalization': LayerNormalization})

    training_generator = DataGenerator(DATASET_PATH, CLIP_LEN, STRIDE, DIM,
                                       BATCH_SIZE, N_CHANNELS, SHUFFLE)

    seq = Sequential()
    seq.add(
        TimeDistributed(Conv2D(16, (11, 11), strides=4, padding="same"),
                        batch_input_shape=(None, *DIM, N_CHANNELS)))
    seq.add(LayerNormalization())
    seq.add(TimeDistributed(Conv2D(8, (8, 8), strides=2, padding="same")))
    seq.add(LayerNormalization())
    ######
    seq.add(ConvLSTM2D(8, (3, 3), padding="same", return_sequences=True))
    seq.add(LayerNormalization())
    seq.add(ConvLSTM2D(4, (3, 3), padding="same", return_sequences=True))
    seq.add(LayerNormalization())
    seq.add(ConvLSTM2D(8, (3, 3), padding="same", return_sequences=True))
    seq.add(LayerNormalization())
    ######
    seq.add(
        TimeDistributed(Conv2DTranspose(8, (8, 8), strides=2, padding="same")))
    seq.add(LayerNormalization())
    seq.add(
        TimeDistributed(
            Conv2DTranspose(16, (11, 11), strides=4, padding="same")))
    seq.add(LayerNormalization())
    seq.add(
        TimeDistributed(
            Conv2D(1, (11, 11), activation="sigmoid", padding="same")))

    print(seq.summary())

    seq.compile(loss='mse',
                optimizer=keras.optimizers.Adam(lr=1e-4,
                                                decay=1e-5,
                                                epsilon=1e-6))
    seq.fit(x=training_generator,
            epochs=EPOCHS,
            verbose=True,
            workers=0,
            use_multiprocessing=False)
    seq.save(MODEL_PATH)

    return seq
예제 #16
0
 def conv_lstm_att1(self):
     inputs = Input(shape=self.input_shape)
     attention_mul = self.attention_3d_block(inputs)
     conv = ConvLSTM2D(32, (3, 3), return_sequences=True)(attention_mul)
     conv2 = ConvLSTM2D(32, (1, 1), return_sequences=True)(conv)
     flat = TimeDistributed(Flatten())(conv2)
     outputs = TimeDistributed(Dense(self.nclasses,
                                     activation='softmax'))(flat)
     model = Model(inputs=inputs, outputs=outputs)
     return model
def _build_network(sequence_length, img_width, img_height):
    with K.name_scope("input_layer"):
        input_layer = Input(shape=(sequence_length, img_width, img_height, 1))
    with K.name_scope("lstmfirst"):
        first_lstm2d = ConvLSTM2D(filters=128, kernel_size=(5,5),padding='same', return_sequences=True)(input_layer)
    with K.name_scope("lstmsecond"):
        second_lstm2d = ConvLSTM2D(filters=64, kernel_size=(5,5),padding='same', return_sequences=True)(first_lstm2d)
    with K.name_scope("output"):
        output_lstm2d = ConvLSTM2D(filters=1, kernel_size=(3,3),padding='same', return_sequences=False)(second_lstm2d)
    model = Model(inputs=input_layer, outputs=output_lstm2d)
    # model = Sequential()
    # model.add(
            # ConvLSTM2D(
                # filters=128,
                # kernel_size=(5,5),
                # input_shape=(sequence_length, img_width, img_height, 1),
                # padding='same',
                # return_sequences=True,
                # )
        # )
    # model.add(
            # ConvLSTM2D(
                # filters=64,
                # kernel_size=(5,5),
                # padding='same',
                # return_sequences=True,
                # )
        # )
    # # model.add(
            # # ConvLSTM2D(
                # # filters=64,
                # # kernel_size=(5,5),
                # # padding='same',
                # # return_sequences=True,
                # # # return_sequences=False,
                # # )
        # # )
    # model.add(
            # ConvLSTM2D(
                # filters=1,
                # kernel_size=(3,3),
                # padding='same',
                # return_sequences=False,
                # )
        # )
    # model.add(
            # Conv3D(filters=1,
                # kernel_size=(3,3,1),
                # # kernel_size=(3,3,3),
                # activation='sigmoid',
                # padding='same',
                # data_format='channels_last',
                # )
            # )
    return model
예제 #18
0
파일: model.py 프로젝트: lebrat/Biolapse
def LSTMNET(input_shape):
    c = 12
    input_img = Input(input_shape, name='input')
    x = ConvLSTM2D(nb_filter=c, nb_row=3, nb_col=3, border_mode='same', return_sequences=True)(input_img)
    x = ConvLSTM2D(nb_filter=c, nb_row=3, nb_col=3, border_mode='same', return_sequences=True)(x)
    c1 = ConvLSTM2D(nb_filter=c, nb_row=3, nb_col=3, border_mode='same', return_sequences=True)(x)

    x = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(c1)

    x = ConvLSTM2D(nb_filter=2 * c, nb_row=3, nb_col=3, border_mode='same', return_sequences=True)(x)
    x = ConvLSTM2D(nb_filter=2 * c, nb_row=3, nb_col=3, border_mode='same', return_sequences=True)(x)
    c2 = ConvLSTM2D(nb_filter=2 * c, nb_row=3, nb_col=3, border_mode='same', return_sequences=True)(x)

    x = TimeDistributed(MaxPooling2D((2, 2), (2, 2)))(c2)
    x = ConvLSTM2D(nb_filter=2 * c, nb_row=3, nb_col=3, border_mode='same', return_sequences=True)(x)
    x = ConvLSTM2D(nb_filter=2 * c, nb_row=3, nb_col=3, border_mode='same', return_sequences=True)(x)
    c3 = ConvLSTM2D(nb_filter=2 * c, nb_row=3, nb_col=3, border_mode='same', return_sequences=True)(x)

    x = TimeDistributed(UpSampling2D((2, 2)))(c3)
    x = concatenate([c2, x])
    x = TimeDistributed(Convolution2D(c, 3, 3, border_mode='same'))(x)

    x = TimeDistributed(UpSampling2D((2, 2)))(x)
    x = concatenate([c1, x])

    x = TimeDistributed(Convolution2D(3, 3, 3, border_mode='same'))(x)
    x = TimeDistributed(Convolution2D(3, 3, 3, border_mode='same'))(x)

    output = TimeDistributed(Convolution2D(1, 3, 3, border_mode='same', activation='sigmoid', name='output'))(x)
    model = Model(inputs=[input_img], outputs=[output])
    model.compile(loss='binary_crossentropy', optimizer='adam')
    return model
예제 #19
0
def fn_get_model_convLSTM_tframe_5():
    k = 3
    model = Sequential()
    model.add(
        ConvLSTM2D(filters=32,
                   kernel_size=(k, k),
                   input_shape=(None, 501, 501, 3),
                   padding='same',
                   return_sequences=True,
                   activation='tanh',
                   recurrent_activation='hard_sigmoid',
                   kernel_initializer='glorot_uniform',
                   unit_forget_bias=True,
                   dropout=0.3,
                   recurrent_dropout=0.3,
                   go_backwards=True))
    model.add(BatchNormalization())

    model.add(
        ConvLSTM2D(filters=32,
                   kernel_size=(k, k),
                   padding='same',
                   return_sequences=True,
                   activation='tanh',
                   recurrent_activation='hard_sigmoid',
                   kernel_initializer='glorot_uniform',
                   unit_forget_bias=True,
                   dropout=0.4,
                   recurrent_dropout=0.3,
                   go_backwards=True))
    model.add(BatchNormalization())

    model.add(
        ConvLSTM2D(filters=32,
                   kernel_size=(k, k),
                   padding='same',
                   return_sequences=False,
                   activation='tanh',
                   recurrent_activation='hard_sigmoid',
                   kernel_initializer='glorot_uniform',
                   unit_forget_bias=True,
                   dropout=0.4,
                   recurrent_dropout=0.3,
                   go_backwards=True))
    model.add(BatchNormalization())

    model.add(
        Conv2D(filters=3,
               kernel_size=(1, 1),
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))

    print(model.summary())
    return model
예제 #20
0
def load_model():
    # use simple CNN structure
    in_shape = (SequenceLength, IMSIZE[0], IMSIZE[1], 3)
    model = Sequential()
    model.add(
        ConvLSTM2D(32,
                   kernel_size=(7, 7),
                   padding='valid',
                   return_sequences=True,
                   input_shape=in_shape))
    model.add(Activation('relu'))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(
        ConvLSTM2D(64,
                   kernel_size=(5, 5),
                   padding='valid',
                   return_sequences=True))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(
        ConvLSTM2D(96,
                   kernel_size=(3, 3),
                   padding='valid',
                   return_sequences=True))
    model.add(Activation('relu'))
    model.add(
        ConvLSTM2D(96,
                   kernel_size=(3, 3),
                   padding='valid',
                   return_sequences=True))
    model.add(Activation('relu'))
    model.add(
        ConvLSTM2D(96,
                   kernel_size=(3, 3),
                   padding='valid',
                   return_sequences=True))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(Dense(320))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    out_shape = model.output_shape
    # print('====Model shape: ', out_shape)
    model.add(
        Reshape((SequenceLength, out_shape[2] * out_shape[3] * out_shape[4])))
    model.add(LSTM(64, return_sequences=False))
    model.add(Dropout(0.5))
    model.add(Dense(N_CLASSES, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # model structure summary
    print(model.summary())

    return model
예제 #21
0
    def create_conv_lstm_model_arch(self, input_shape, num_classes, hp):
        model = Sequential()
        model.add(
            ConvLSTM2D(hp.hidden_neurons,
                       kernel_initializer=hp.weight_init,
                       kernel_size=(1, 1),
                       data_format='channels_last',
                       input_shape=input_shape,
                       padding='valid',
                       return_sequences=True,
                       dropout=hp.drop_out_rate,
                       recurrent_dropout=hp.drop_out_rate,
                       activation=hp.activation_func))
        if hp.network_topology == "deep":
            model.add(
                ConvLSTM2D(hp.hidden_neurons,
                           kernel_size=(1, 1),
                           padding='valid',
                           return_sequences=True,
                           dropout=hp.drop_out_rate,
                           recurrent_dropout=hp.drop_out_rate,
                           activation=hp.activation_func))
            model.add(
                ConvLSTM2D(hp.hidden_neurons,
                           kernel_size=(1, 1),
                           padding='valid',
                           return_sequences=True,
                           dropout=hp.drop_out_rate,
                           recurrent_dropout=hp.drop_out_rate,
                           activation=hp.activation_func))
        model.add(
            ConvLSTM2D(hp.hidden_neurons,
                       kernel_size=(1, 1),
                       padding='valid',
                       return_sequences=False,
                       dropout=hp.drop_out_rate,
                       recurrent_dropout=hp.drop_out_rate,
                       activation=hp.activation_func))
        model.add(GlobalMaxPooling2D())
        model.add(Dropout(hp.drop_out_rate))

        #         model.add(Flatten())

        #         model.add(Dense(128, activation='relu'))
        #         model.add(BatchNormalization())
        model.add(Dense(num_classes, activation='softmax'))  # softmax sigmoid

        model.compile(
            loss=keras.losses.
            categorical_crossentropy,  #categorical_crossentropy
            optimizer=hp.get_optimizer(),  #Adadelta, Nadam, SGD, Adam
            metrics=['accuracy'])

        print(model.summary())
        return model
예제 #22
0
def getmodel1():
    model = Sequential()
    input_shape = (data1.shape[0], data1.shape[1], data1.shape[2], 1)
    #samples, time, rows, cols, channels
    model.add(ConvLSTM2D(16, kernel_size=(3,3), activation='sigmoid',padding='same',input_shape=input_shape,
                         return_sequences=True))
    model.add(ConvLSTM2D(8, kernel_size=(3,3), activation='sigmoid',padding='same'))
    model.add(GlobalAveragePooling2D())
    model.add(Dense(10, activation='softmax'))  # output shape: (None, 10)
    print (model.summary())
    return model
예제 #23
0
 def _model(self):
     model = Sequential()
     model.add(
         Conv2D(
             64,
             9,
             padding='same',
             activation='relu',
             input_shape=(160, 90, 3),
         ))
     model.add(UpSampling2D())
     model.add(Reshape(target_shape=(1, 320, 180, 64)))
     model.add(
         ConvLSTM2D(
             64,
             3,
             padding='same',
             activation='relu',
             input_shape=(None, 320, 180, 32),
         ))
     model.add(Reshape(target_shape=(320, 180, 64)))
     model.add(
         Conv2D(
             32,
             1,
             padding='same',
             activation='relu',
             input_shape=(320, 180, 64),
         ))
     model.add(UpSampling2D())
     model.add(Reshape(target_shape=(1, 640, 360, 32)))
     model.add(
         ConvLSTM2D(
             32,
             3,
             padding='same',
             activation='relu',
             input_shape=(None, 640, 360, 32),
         ))
     model.add(Reshape(target_shape=(640, 360, 32)))
     model.add(
         Conv2D(
             3,
             5,
             padding='same',
             activation='relu',
             input_shape=(640, 360, 32),
         ))
     model.compile(
         optimizer=Adam(lr=1e-3),
         loss='mse',
         metrics=[psnr_loss],
     )
     return model
예제 #24
0
    def convlstm2d(
        name='convlstm2d',
        input_timestep=32,
        sta_num=20,
        output_timestep=1,
        optimizer='adam',
        metrics=['mae'],
        loss='mse'
    )->Model:
        from keras.models import Sequential
        from keras.layers import BatchNormalization,LSTM,Dropout,RepeatVector,TimeDistributed,Dense,Flatten,Reshape,ConvLSTM2D

        model = Sequential(name=name)
        model.add(Reshape((input_timestep,sta_num,1,1),input_shape=(input_timestep,sta_num)))#make input same shape with purelstm model
        model.add(BatchNormalization(name = 'batch_norm_0', input_shape = (input_timestep, sta_num, 1, 1)))
        model.add(ConvLSTM2D(name ='conv_lstm_1',
                            filters = 64, kernel_size = (10, 1),                       
                            padding = 'same', 
                            return_sequences = True))
        
        model.add(Dropout(0.21, name = 'dropout_1'))
        model.add(BatchNormalization(name = 'batch_norm_1'))

        model.add(ConvLSTM2D(name ='conv_lstm_2',
                            filters = 64, kernel_size = (5, 1), 
                            padding='same',
                            return_sequences = False))
        
        model.add(Dropout(0.20, name = 'dropout_2'))
        model.add(BatchNormalization(name = 'batch_norm_2'))
        
        model.add(Flatten())
        model.add(RepeatVector(output_timestep))
        model.add(Reshape((output_timestep, sta_num, 1, 64)))
        
        model.add(ConvLSTM2D(name ='conv_lstm_3',
                            filters = 64, kernel_size = (10, 1), 
                            padding='same',
                            return_sequences = True))
        
        model.add(Dropout(0.20, name = 'dropout_3'))
        model.add(BatchNormalization(name = 'batch_norm_3'))
        
        model.add(ConvLSTM2D(name ='conv_lstm_4',
                            filters = 64, kernel_size = (5, 1), 
                            padding='same',
                            return_sequences = True))
        
        model.add(TimeDistributed(Dense(units=1, name = 'dense_1', activation = 'relu')))
        model.add(Flatten())#prevent topological error
        
        model.compile(optimizer=optimizer,metrics=metrics,loss=loss)
        
        return model
예제 #25
0
def buildConvLSTM(output_size=1, steps=1, dropout=0.3):
    model = Sequential()
    # model.add(Input())
    # model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
    #            input_shape=(None, 40, 40, 1),
    #            padding='same', return_sequences=True))

    model.add(
        ConvLSTM2D(filters=16,
                   kernel_size=(1, 1),
                   padding='same',
                   input_shape=(None, 1, 1, 5),
                   use_bias=True,
                   bias_initializer='glorot_uniform',
                   return_sequences=True,
                   activation='tanh'))
    # input_shape = (inputShape[0], inputShape[1])
    model.add(BatchNormalization())
    model.add(Dropout(dropout))
    model.add(
        ConvLSTM2D(filters=16,
                   kernel_size=(1, 1),
                   padding='same',
                   use_bias=True,
                   bias_initializer='glorot_uniform',
                   return_sequences=True,
                   activation='tanh'))
    model.add(Dropout(dropout))
    model.add(
        ConvLSTM2D(filters=16,
                   kernel_size=(1, 1),
                   padding='same',
                   use_bias=True,
                   bias_initializer='glorot_uniform',
                   return_sequences=True,
                   activation='tanh'))
    model.add(BatchNormalization())
    model.add(Dropout(dropout))
    # model.add(LSTM(64, activation='tanh', return_sequences=True))
    # model.add(Dropout(dropout))
    model.add(
        Dense(units=output_size,
              use_bias=True,
              bias_initializer='glorot_uniform',
              activation='linear'))
    # model.add(Activation('linear'))
    model.compile(loss=root_mean_squared_error,
                  optimizer='adam',
                  metrics=['mae'])
    model.summary()
    curT = datetime.datetime.utcfromtimestamp(
        time.time()).strftime("%d-%H-%M-%S")
    plot_model(model, to_file=f'{curT}modelConvLSTM.png', show_shapes=True)
    return model
예제 #26
0
파일: model.py 프로젝트: dksshddl/Sal-test
    def supervised_lstm(input_shape,
                        action_size,
                        learning_rate=0.01,
                        backbone='resnet',
                        time_distributed=True,
                        multi_gpu=True):
        img_input = Input(shape=input_shape, dtype='float32')
        if backbone == 'resnet':
            x = dcn_resnet(img_input, time_distributed)
        elif backbone == 'mobilenet':
            mobilenet = keras.applications.mobilenet_v2.MobileNetV2(
                include_top=False, weights=None, pooling='max')
            x = TimeDistributed(mobilenet)(img_input)
        elif backbone == 'convLSTM':
            x = ConvLSTM2D(filters=40,
                           kernel_size=(3, 3),
                           padding='same',
                           return_sequences=False)(img_input)
            x = BatchNormalization()(x)
            x = ConvLSTM2D(filters=40,
                           kernel_size=(3, 3),
                           padding='same',
                           return_sequences=False)(x)
            x = BatchNormalization()(x)
            x = ConvLSTM2D(filters=40,
                           kernel_size=(3, 3),
                           padding='same',
                           return_sequences=False)(x)
            x = BatchNormalization()(x)
            x = ConvLSTM2D(filters=40,
                           kernel_size=(3, 3),
                           padding='same',
                           return_sequences=False)(x)
            x = BatchNormalization()(x)
        else:
            x = TimeDistributed(
                Conv2D(32, kernel_size=8, strides=4,
                       activation='relu'))(img_input)
            x = TimeDistributed(
                Conv2D(64, kernel_size=4, strides=2, activation='relu'))(x)
            x = TimeDistributed(
                Conv2D(64, kernel_size=3, strides=1, activation='relu'))(x)
        x = TimeDistributed(Flatten())(x)
        x = LSTM(512)(x)
        x = Dropout(rate=0.5)(x)
        x = Dense(action_size, activation='sigmoid', name='x_train_out')(x)
        optimizer = Adam(lr=learning_rate)
        model = Model(inputs=img_input, outputs=x)

        model.compile(optimizer=optimizer,
                      loss=keras.losses.binary_crossentropy,
                      metrics=['accuracy'])
        model.summary()
        return model
예제 #27
0
def getmodel2():
    model = Sequential()
    input_shape = (data1.shape[0], data1.shape[1], data1.shape[2], 1)
    model.add(ConvLSTM2D(16, kernel_size=(3, 3), activation='sigmoid', padding='same',
                         input_shape=input_shape,
                         return_sequences=True))
    model.add(ConvLSTM2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same',
                         return_sequences=True))

    model.compile(loss='mse', optimizer='adam')
    return model
예제 #28
0
 def conv_lstm4(self):
     """ 3 conv-lstms
     """
     inputs = Input(shape=self.input_shape)
     conv = ConvLSTM2D(32, (3, 3), return_sequences=True)(inputs)
     conv2 = ConvLSTM2D(32, (1, 1), return_sequences=True)(conv)
     conv3 = ConvLSTM2D(32, (1, 1), return_sequences=True)(conv2)
     flat = TimeDistributed(Flatten())(conv3)
     outputs = TimeDistributed(Dense(self.nclasses,
                                     activation='softmax'))(flat)
     model = Model(inputs=inputs, outputs=outputs)
     return model
예제 #29
0
 def conv_lstm6(self):
     """ ConvLSTM with Dropout before dense
     """
     inputs = Input(shape=self.input_shape)
     conv = ConvLSTM2D(32, (3, 3), return_sequences=True)(inputs)
     conv2 = ConvLSTM2D(32, (1, 1), return_sequences=True)(conv)
     flat = TimeDistributed(Flatten())(conv2)
     drop = Dropout(0.5)(flat)
     outputs = TimeDistributed(Dense(self.nclasses,
                                     activation='softmax'))(drop)
     model = Model(inputs=inputs, outputs=outputs)
     return model
예제 #30
0
    def ConvLSTM2D_model(self, input_shape):
        keras.backend.set_image_dim_ordering('tf')
        # This returns a tensor        

        if os.path.exists('model.h5'):
            # 如果模型已经存在,则加载模型,继续训练。
            print('模型已经存在,加载模型,继续训练。')
            model = load_model('model.h5')
            model.summary()  
        else:
            print('模型不存在,重新训练。')
            k = 4    
            go_backwards = False
            model = Sequential()
            model.add(ConvLSTM2D(filters=32, kernel_size=(k, k),
                                input_shape=input_shape, padding='same', return_sequences=True, 
                                activation='tanh', recurrent_activation='hard_sigmoid',
                                kernel_initializer='glorot_uniform', unit_forget_bias=True, 
                                dropout=0.3, recurrent_dropout=0.3, go_backwards=go_backwards ))
            model.add(BatchNormalization())
            
            
            model.add(ConvLSTM2D(filters=32, kernel_size=(k, k), padding='same', return_sequences=True, 
                                activation='tanh', recurrent_activation='hard_sigmoid', 
                                kernel_initializer='glorot_uniform', unit_forget_bias=True, 
                                dropout=0.4, recurrent_dropout=0.3, go_backwards=go_backwards ))
            model.add(BatchNormalization())

            model.add(ConvLSTM2D(filters=32, kernel_size=(k, k), padding='same', return_sequences=True, 
                                activation='tanh', recurrent_activation='hard_sigmoid', 
                                kernel_initializer='glorot_uniform', unit_forget_bias=True, 
                                dropout=0.4, recurrent_dropout=0.3, go_backwards=go_backwards ))
            model.add(BatchNormalization())

            model.add(ConvLSTM2D(filters=32, kernel_size=(k, k), padding='same', return_sequences=True, 
                                activation='tanh', recurrent_activation='hard_sigmoid', 
                                kernel_initializer='glorot_uniform', unit_forget_bias=True, 
                                dropout=0.4, recurrent_dropout=0.3, go_backwards=go_backwards ))
            model.add(BatchNormalization())
            
            
            model.add(ConvLSTM2D(filters=32, kernel_size=(k, k), padding='same', return_sequences=False, 
                                activation='tanh', recurrent_activation='hard_sigmoid', 
                                kernel_initializer='glorot_uniform', unit_forget_bias=True, 
                                dropout=0.4, recurrent_dropout=0.3, go_backwards=go_backwards ))
            model.add(BatchNormalization())
            
            model.add(Conv2D(filters=3, kernel_size=(1, 1),
                        activation='sigmoid',
                        padding='same', data_format='channels_last'))         
            model.compile(optimizer='rmsprop', loss="fn_keras_rmse2")
            model.summary()                    
        self.model = model