예제 #1
0
def getLSTMLayer(inputLayerName, Ns, seq_len=3, backend='tf'):
    '''
    Ns=size of input image, assuming square
    seq_len= # of samples
    '''
    if backend == 'tf':
        input_shape = (seq_len, Ns, Ns, 1)  # samples, times, h, w, c
    else:
        input_shape = (seq_len, 1, Ns, Ns)  # samples, times, c, h, w

    inputLayer = Input(shape=input_shape, name=inputLayerName)

    layer = ConvLSTM2D(32,
                       kernel_size=(3, 3),
                       padding="same",
                       kernel_regularizer=regularizers.l2(0.01),
                       bias_initializer='zeros')(inputLayer)
    layer = BatchNormalization()(layer)
    layer = ConvLSTM2D(16,
                       kernel_size=(3, 3),
                       padding="same",
                       return_sequences=True)(layer)
    layer = BatchNormalization()(layer)
    layer = ConvLSTM2D(16,
                       kernel_size=(3, 3),
                       padding="same",
                       return_sequences=True)(layer)
    layer = BatchNormalization()(layer)
    #layer = Conv2D(32, kernel_size=(3,3), padding="same",  bias_initializer='zeros', activation="relu")(layer)
    #layer = Conv2D(16, kernel_size=(3,3), padding="same",  bias_initializer='zeros', activation="relu")(layer)
    #layer = BatchNormalization()(layer)
    return layer, inputLayer
예제 #2
0
def loadWeights():
    seq.add(
        ConvLSTM2D(filters=5,
                   kernel_size=(3, 3),
                   input_shape=(29, 256, 256, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(TimeDistributed(MaxPooling2D((2, 2), (2, 2))))
    seq.add(TimeDistributed(MaxPooling2D((2, 2), (2, 2))))
    seq.add(
        ConvLSTM2D(filters=5,
                   kernel_size=(3, 3),
                   input_shape=(29, 256, 256, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(TimeDistributed(MaxPooling2D((2, 2), (2, 2))))
    seq.add(
        ConvLSTM2D(filters=5,
                   kernel_size=(3, 3),
                   input_shape=(29, 256, 256, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(TimeDistributed(MaxPooling2D((2, 2), (2, 2))))
    seq.add(Flatten())
    seq.add(Dense(1024))
    seq.add(Dense(3))
    seq.add(Activation('softmax'))
    seq.compile(loss='categorical_crossentropy',
                optimizer='ADAM',
                metrics=['accuracy'])
    seq.load_weights('weights-improvement-15-0.88.hdf5')
    print('i am done')
예제 #3
0
def Get_Model(kernel=21, height=501, width=501, batch_size=1, times=1):
    model = Sequential()
    model.add(
        ConvLSTM2D(filters=3,
                   batch_input_shape=(batch_size, times, height, width, 1),
                   kernel_size=(kernel, kernel),
                   padding='same',
                   return_sequences=True,
                   stateful=True))
    model.add(
        ConvLSTM2D(filters=6,
                   batch_input_shape=(batch_size, times, height, width, 1),
                   kernel_size=(kernel, kernel),
                   padding='same',
                   return_sequences=True,
                   stateful=True))
    model.add(
        ConvLSTM2D(filters=6,
                   batch_input_shape=(batch_size, times, height, width, 1),
                   kernel_size=(kernel, kernel),
                   padding='same',
                   return_sequences=True,
                   stateful=True))
    model.add(
        TimeDistributed(Conv2D(filters=1, kernel_size=(9, 9), padding='same')))
    optimizer = RMSprop(lr=0.00001, rho=0.9, epsilon=1e-08, decay=0.0)
    model.compile(loss='mae', optimizer=optimizer)
    return model
예제 #4
0
def _build_network(sequence_length, img_width, img_height):
    seq = Sequential()
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   input_shape=(None, img_width, img_height, 1),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    seq.add(BatchNormalization())
    seq.add(
        Conv3D(filters=1,
               kernel_size=(3, 3, 3),
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))
    return seq
예제 #5
0
def create_conv2dlstm_model():
    input = Input(shape=input_shape)
    #input_shape = (batch_size, rows,cols, size_sequence)
    conv1 = ConvLSTM2D(filters=64,
                       input_shape=(None, img_rows, img_cols, size_sequence),
                       kernel_size=(3, 3),
                       strides=(2, 2),
                       padding='same',
                       kernel_initializer='random_uniform')(input)
    if dropout:
        conv1 = Dropout(0.5)(conv1)
    conv2 = ConvLSTM2D(filters=32,
                       kernel_size=(3, 3),
                       padding='same',
                       kernel_initializer='random_uniform')(conv1)

    data = Flatten()(conv2)

    output = Dense(32)(data)
    output = Dense(5)(output)

    model = Model(inputs=input, outputs=output)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
예제 #6
0
def load_model():
    # use simple CNN structure
    in_shape = (SequenceLength, IMSIZE[0], IMSIZE[1], 3)
    model = Sequential()
    model.add(ConvLSTM2D(32, kernel_size=(7, 7), padding='valid', return_sequences=True, input_shape=in_shape))
    model.add(Activation('relu'))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(ConvLSTM2D(64, kernel_size=(5, 5), padding='valid', return_sequences=True))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))
    model.add(Activation('relu'))
    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))
    model.add(Activation('relu'))
    model.add(ConvLSTM2D(96, kernel_size=(3, 3), padding='valid', return_sequences=True))
    model.add(MaxPooling3D(pool_size=(1, 2, 2)))
    model.add(Dense(320))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    out_shape = model.output_shape
    # print('====Model shape: ', out_shape)
    model.add(Reshape((SequenceLength, out_shape[2] * out_shape[3] * out_shape[4])))
    model.add(LSTM(64, return_sequences=False))
    model.add(Dropout(0.5))
    model.add(Dense(N_CLASSES, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

    # model structure summary
    print(model.summary())

    return model
예제 #7
0
def get_model(reload_model=True):
    """
    Parameters
    ----------
    reload_model : bool
        Load saved model or retrain it
    """
    if not reload_model:
        return load_model(Config.MODEL_PATH,custom_objects={'LayerNormalization': LayerNormalization})
    training_set = get_training_set()
    training_set = np.array(training_set)
    seq = Sequential()
    seq.add(TimeDistributed(Conv2D(128, (11, 11), strides=4, padding="same"), batch_input_shape=(None, 10, 256, 256, 1)))
    seq.add(LayerNormalization())
    seq.add(TimeDistributed(Conv2D(64, (5, 5), strides=2, padding="same")))
    seq.add(LayerNormalization())
    # # # # #
    seq.add(ConvLSTM2D(64, (3, 3), padding="same", return_sequences=True))
    seq.add(LayerNormalization())
    seq.add(ConvLSTM2D(32, (3, 3), padding="same", return_sequences=True))
    seq.add(LayerNormalization())
    seq.add(ConvLSTM2D(64, (3, 3), padding="same", return_sequences=True))
    seq.add(LayerNormalization())
    # # # # #
    seq.add(TimeDistributed(Conv2DTranspose(64, (5, 5), strides=2, padding="same")))
    seq.add(LayerNormalization())
    seq.add(TimeDistributed(Conv2DTranspose(128, (11, 11), strides=4, padding="same")))
    seq.add(LayerNormalization())
    seq.add(TimeDistributed(Conv2D(1, (11, 11), activation="sigmoid", padding="same")))
    print(seq.summary())
    seq.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=1e-4, decay=1e-5, epsilon=1e-6))
    seq.fit(training_set, training_set,
            batch_size=Config.BATCH_SIZE, epochs=Config.EPOCHS, shuffle=False)
    seq.save(Config.MODEL_PATH)
    return seq
def _build_network(sequence_length, img_width, img_height):
    model = Sequential()
    model.add(
        ConvLSTM2D(
            filters=128,
            kernel_size=(3, 3),
            input_shape=(sequence_length, img_width, img_height, 1),
            padding='same',
            return_sequences=True,
        ))
    model.add(
        ConvLSTM2D(
            filters=64,
            kernel_size=(3, 3),
            padding='same',
            return_sequences=True,
        ))
    model.add(
        ConvLSTM2D(
            filters=64,
            kernel_size=(3, 3),
            padding='same',
            return_sequences=True,
        ))
    model.add(
        Conv3D(
            filters=1,
            kernel_size=(3, 3, 3),
            activation='sigmoid',
            padding='same',
            data_format='channels_last',
        ))
    return model
예제 #9
0
def ConvLSTM_Model(frames, channels, pixels_x, pixels_y, categories):
    trailer_input = Input(shape=(frames, channels, pixels_x, pixels_y)
                          , name='trailer_input')

    first_ConvLSTM = ConvLSTM2D(filters=20, kernel_size=(3, 3)
                                , data_format='channels_first'
                                , recurrent_activation='hard_sigmoid'
                                , activation='tanh'
                                , padding='same', return_sequences=True)(trailer_input)
    first_BatchNormalization = BatchNormalization()(first_ConvLSTM)
    first_Pooling = MaxPooling3D(pool_size=(1, 2, 2), padding='same', data_format='channels_first')(
        first_BatchNormalization)

    second_ConvLSTM = ConvLSTM2D(filters=10, kernel_size=(3, 3)
                                 , data_format='channels_first'
                                 , padding='same', return_sequences=True)(first_Pooling)
    second_BatchNormalization = BatchNormalization()(second_ConvLSTM)
    second_Pooling = MaxPooling3D(pool_size=(1, 3, 3), padding='same', data_format='channels_first')(
        second_BatchNormalization)

    outputs = [branch(second_Pooling, 'cat_{}'.format(category)) for category in categories]

    seq = Model(inputs=trailer_input, outputs=outputs, name='Model ')

    return seq
예제 #10
0
    def create_model_convlstm(self):
        # Define the network architecture
        input_data = Input(name='input', shape=(32, self.width, self.height, 1))

        # 1...
        # flatten_input = TimeDistributed(Flatten())(input_data)
        # masking = TimeDistributed(Masking(mask_value=self.padding_value))(flatten_input)
        # noise = TimeDistributed(GaussianNoise(0.01))(masking)
        # reshaped = TimeDistributed(Reshape(input_data[1:]))(noise)

        # 2...
        # masking = TimeDistributed(Masking(mask_value=self.padding_value))(input_data)
        # masking = TimeDistributed(Reshape(input_data))(masking)
        # noise = GaussianNoise(0.01)(masking)

        blstm = Bidirectional(
            ConvLSTM2D(filters=16, kernel_size=(3, 3), padding='same', return_sequences=True, dropout=0.1))(input_data)
        blstm = Bidirectional(
            ConvLSTM2D(filters=16, kernel_size=(3, 3), padding='same', return_sequences=True, dropout=0.1))(blstm)
        blstm = Bidirectional(
            ConvLSTM2D(filters=16, kernel_size=(3, 3), padding='same', return_sequences=True, dropout=0.1))(blstm)

        flatten = TimeDistributed(Flatten())(blstm)
        dense = TimeDistributed(Dense(len(letters) + 1, name="dense"))(flatten)
        outrnn = Activation('softmax', name='softmax')(dense)

        network = CTCModel([input_data], [outrnn])
        network.compile(Adam(lr=0.0001))
        return network
    def setup(self, X_train_shape):
        #print ('X_train shape', X_train_shape)
        # Input shape = (None,5,20,126,1)
        inputs = Input(shape=X_train_shape[1:])

        normal1 = BatchNormalization(axis=2, name='normal1')(inputs)

        convlstm1 = ConvLSTM2D(filters=16,
                               kernel_size=(X_train_shape[2], 3),
                               padding='valid',
                               strides=(1, 2),
                               activation='tanh',
                               dropout=0.0,
                               recurrent_dropout=0.0,
                               return_sequences=True,
                               name='convlstm1')(normal1)

        convlstm2 = ConvLSTM2D(filters=32,
                               kernel_size=(1, 3),
                               padding='valid',
                               strides=(1, 2),
                               activation='tanh',
                               dropout=0.0,
                               recurrent_dropout=0.0,
                               return_sequences=True,
                               name='convlstm2')(convlstm1)

        convlstm3 = ConvLSTM2D(filters=64,
                               kernel_size=(1, 3),
                               padding='valid',
                               strides=(1, 2),
                               activation='tanh',
                               dropout=0.0,
                               recurrent_dropout=0.0,
                               return_sequences=False,
                               name='convlstm3')(convlstm2)

        flat = Flatten()(convlstm3)

        drop1 = Dropout(0.5)(flat)

        dens1 = Dense(256, activation='sigmoid', name='dens1')(drop1)
        drop2 = Dropout(0.5)(dens1)

        dens2 = Dense(self.nb_classes, name='dens2')(drop2)

        # option to include temperature in softmax
        temp = 1.0
        temperature = Lambda(lambda x: x / temp)(dens2)
        last = Activation('softmax')(temperature)

        self.model = Model(input=inputs, output=last)

        adam = Adam(lr=5e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        self.model.compile(loss='binary_crossentropy',
                           optimizer=adam,
                           metrics=['accuracy'])

        print(self.model.summary())
        return self
예제 #12
0
    def create_model_cnn_rnn_extracted_inference(self):
        K.set_learning_phase(0)
        input_images = Input(batch_shape=(self.batch_size, 1, self.image_h,
                                          self.image_w, 3),
                             name='images_input')
        feature_detector_model = self.detector.get_feature_model(
            is_before_activation=False)

        print("\nSummary of feature detector:")
        feature_detector_model.summary()
        yolo_feats_seq = TimeDistributed(feature_detector_model,
                                name='each_frame_feats')\
                                        (input_images)

        recurrent_state = ConvLSTM2D(256, (1, 1), strides=(1, 1), padding='same',
                return_sequences=True, stateful=True, name='conv_lstm_1')\
                    (yolo_feats_seq)
        recurrent_state = ConvLSTM2D(256, (1, 1), strides=(1, 1), padding='same',
                return_sequences=False, stateful=True, name='conv_lstm_2')\
                    (recurrent_state)
        output_conv = Conv2D(self.nb_box * (4 + 1 + self.nb_class), (1, 1),
                             strides=(1, 1),
                             padding='same',
                             kernel_initializer='lecun_normal',
                             name='track_conv')(recurrent_state)
        output_reshaped = Reshape((self.grid_h, self.grid_w, self.nb_box,
                                   4 + 1 + self.nb_class))(output_conv)
        model = Model(input_images, output_reshaped, name='cnn+rnn model')

        print("\nFull MODEL:")
        model.summary()
        return model
예제 #13
0
def MS_build_multivar_convlstm_model(model_configs, X, y, val_X, val_y):
    n_inputs, n_nodes, n_epochs, n_batch, n_seq, model_type = model_configs
    n_timesteps, n_features, n_outputs = (X.shape[3] *
                                          n_seq), X.shape[4], y.shape[1]
    #, return_sequences=True

    # define the input cnn model
    model = Sequential()
    model.add(
        ConvLSTM2D(n_nodes, (1, 3),
                   activation='relu',
                   return_sequences=True,
                   input_shape=(n_seq, 1, int(
                       (n_timesteps / n_seq)), n_features)))
    model.add(Dropout(0.2))
    model.add(ConvLSTM2D(n_nodes, (1, 3), activation='relu'))
    model.add(Flatten())
    model.add(RepeatVector(n_outputs))
    model.add(Dropout(0.2))
    model.add(Dense(100, activation='relu'))
    model.add(Dense(1))

    custom_sgd = SGD(lr=1e-1, momentum=0.9)
    clr = CyclicLR(mode='triangular2', base_lr=1e-3, max_lr=1e-1)
    model.compile(loss='mae', optimizer=custom_sgd, metrics=['mae'])
    # fit network
    model.fit(X,
              y,
              epochs=n_epochs,
              batch_size=n_batch,
              verbose=2,
              validation_data=(val_X, val_y),
              callbacks=[clr])
    return model
예제 #14
0
 def build_model(self):
     # Conv LSTM
     noise = Input(shape=(self.rows, self.cols), name='Input')
     l1 = Reshape((noise.shape[1], noise.shape[2], 1),
                  name='Reshape_s1s21')(noise)
     l2 = Reshape((noise.shape[1], 1, noise.shape[2]),
                  name='Reshape_s11s2')(noise)
     aff = Lambda(lambda x: tf.matmul(x[0], x[1]),
                  name='Affinity')([l1, l2])
     aff = Reshape((TIME_DIM, noise.shape[2], noise.shape[2], 1),
                   name='Affinity_t_sliced')(aff)
     seq1 = ConvLSTM2D(32,
                       3,
                       activation=self.h_activation,
                       return_sequences=True)(aff)
     seq2 = ConvLSTM2D(32,
                       3,
                       activation=self.h_activation,
                       return_sequences=True)(seq1)
     seq = Reshape(
         (1, TIME_DIM, seq2.shape[2] * seq2.shape[3] * seq2.shape[4]))(seq2)
     per = Permute((1, 3, 2))(seq)
     wgh = DepthwiseConv2D(1, 1, activation='softmax')(per)
     aff = Multiply()([wgh, per])
     mean = Lambda(lambda x: K.mean(x, axis=3))(aff)
     per2 = Permute((2, 1))(mean)
     pool3 = Flatten()(per2)
     dense2 = Dense(self.cols, activation=self.o_activation)(pool3)
     model = Model(inputs=noise, outputs=dense2)
     model.summary()
     return model
예제 #15
0
def create_cnn(height,width, deep):
    # initialize the input shape and channel dimension, assuming
    # TensorFlow/channels-last ordering
    inputShape = (seq_length, height, width, 1)
    #inputShape = (None,None,None,1)
    model = Sequential()

    model.add(ConvLSTM2D(64, (16, 16), strides=(3, 3),
        activation='relu', padding='same', input_shape=inputShape, return_sequences=True))
    model.add(BatchNormalization())
    model.add(ConvLSTM2D(128, (7, 7), strides=(2, 2),
        activation='relu', padding='same', return_sequences=True ))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(ConvLSTM2D(256, (3, 3), strides=(4, 2),
        activation='relu', padding='same', return_sequences=False))
    model.add(BatchNormalization())

    #model.add(TimeDistributed(Conv2D(128, (16,16), strides=(8, 8),
    #    kernel_initializer="he_normal", activation='relu')))
    model.add(MaxPooling2D((2, 2)))

    #model.add(AveragePooling3D((1, 135, 240)))

    model.add(Flatten())
    #model.add(Reshape((-1, 40)))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(256, activation='relu'))
    model.add(Dense(1, activation='linear'))
    
    adam = Adam(lr=0.00001, decay=1e-6)
    model.compile(loss='mean_squared_error', optimizer=adam)
    
    return model
def Multi_gpu_conv2d_lstm(input_shape= (10,60,60,2), output_shape = 3,filters_list=[16,8], return_seq=False):
    '''
    Input: A tensor, shape:(batch_size, time_length, width, height, channels)
    Output: A tensor,shape:(batch_size, 1) or (batch_size, time_length)
    input_shape : (time_length, width, height, channels)
    output_shape: a real number or a sequence for each input
    filters_list: number of filters for each conv2d_lstm layer
    return_seq: The return_sequences for lst
    '''

    X_input = Input(input_shape)
    X = ConvLSTM2D(filters=32, kernel_size=(3, 3),padding='same',return_sequences=True)(X_input)
    X = Activation('relu')(X)
    X = BatchNormalization()(X)

    for i in range(len(filters_list)):
        #gpu_name = '/gpu:'+ str(i%2)
        gpu_name = '/gpu:1'
        with tf.device(gpu_name):
            X = ConvLSTM2D(filters=filters_list[i], kernel_size=(3, 3),padding='same', return_sequences=True)(X)
            X = Activation('relu')(X)
            X = BatchNormalization()(X)

    if return_seq:
       # X = TimeDistributed(Conv2D(filters=1, kernel_size=(3, 3),activation='relu', data_format = 'channels_last'))(X)
        Output = ConvLSTM2D(filters=output_shape, kernel_size=(1, 1),padding='same',return_sequences=True, name="extra_output")(X)
        X = TimeDistributed(Flatten())(Output)
        X = TimeDistributed(Dense(1), name='main_output')(X)
    else:
        Output = Conv3D(filters=2, kernel_size=(3, 3, 3),activation='relu', data_format = 'channels_last', name="extra_output")(X)
        X = Flatten()(Output)
        X = Dense(1, name='main_output')(X)

    model = Model(inputs=X_input, outputs=[X, Output])
    return model
예제 #17
0
def create_m3():
    lstmnum = 4
    model = Sequential()
    input1 = keras.layers.Input(shape=(lstmnum, 100, 128, 3))
    lstm_out1 = ConvLSTM2D(filters=1, kernel_size=[3, 3], strides=(1, 1), padding='valid', kernel_constraint=max_norm(2.), activation='relu',
                       input_shape=(lstmnum, 100, 128, 3), return_sequences=True)(input1)
    x = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
                           gamma_initializer='ones', moving_mean_initializer='zeros',
                           moving_variance_initializer='ones')(lstm_out1)
    x = MaxPool3D(pool_size=(2, 2, 2))(x)
    lstm_out2 = ConvLSTM2D(filters=2, kernel_size=[3, 3], strides=(1, 1), padding='valid', activation='relu',
                       return_sequences=True)(x)
    x = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
                           gamma_initializer='ones', moving_mean_initializer='zeros',
                           moving_variance_initializer='ones')(lstm_out2)
    x = MaxPool3D(pool_size=(2, 2, 2))(x)
    x = ConvLSTM2D(filters=3, kernel_size=[3, 3], strides=(1, 1), padding='valid', activation='relu',
                       return_sequences=True)(x)
    x = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
                           gamma_initializer='ones', moving_mean_initializer='zeros',
                           moving_variance_initializer='ones')(x)
    #x = MaxPool3D(pool_size=(2, 2, 2))(x)
    x = ConvLSTM2D(filters=3, kernel_size=[3, 3], strides=(1, 1), padding='valid', activation='relu',
                       return_sequences=True)(x)
    x = BatchNormalization(axis=-1, momentum=0.9, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
                           gamma_initializer='ones', moving_mean_initializer='zeros',
                           moving_variance_initializer='ones')(x)
    flat = Flatten()(x)
    out = Dropout(0.3)(flat)
    out = Dense(2, kernel_regularizer=regularizers.l2(0.01), activation='softmax')(out)
    model = keras.models.Model(inputs=input1, outputs=out)
    return model
예제 #18
0
    def build(height, width, depth, nb_classes, nb_slots):
        model = Sequential()
        model.add(
            ConvLSTM2D(filters=32,
                       kernel_size=(3, 3),
                       activation='tanh',
                       return_sequences=True,
                       padding='same',
                       input_shape=(nb_slots, height, width, depth),
                       name='FirstCLSTMv2'))
        model.add(BatchNormalization())
        model.add(
            ConvLSTM2D(filters=64,
                       kernel_size=(3, 3),
                       activation='tanh',
                       name='secondLSTM',
                       return_sequences=True))
        model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
        model.add(TimeDistributed(Flatten()))
        # model.add(TimeDistributed(Dropout(0.25)))
        # model.add(Flatten())
        model.add(TimeDistributed(Dense(128, activation='tanh')))
        # model.add(TimeDistributed(Dropout(0.5)))
        model.add(TimeDistributed(Dense(nb_classes, activation='softmax')))
        print(model.summary())

        return model
def create_convlstm_model():
    model = Sequential()
    # model.add(BatchNormalization(input_shape=(96, 54, 1)))

    model.add(ConvLSTM2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu', return_sequences=True, input_shape=(3,96,54,1)))
    model.add(BatchNormalization())
    # model.add(MaxPooling3D(pool_size=(1,2,2)))

    model.add(ConvLSTM2D(filters=64, kernel_size=(3,3), strides=(2,2), padding='same', activation='relu', return_sequences=True))
    model.add(BatchNormalization())
    # model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(ConvLSTM2D(filters=64, kernel_size=(3,3), strides=(2,2), padding='same', activation='relu', return_sequences=False))
    model.add(BatchNormalization())

    # model.add(Conv2D(filters=1, kernel_size=(3, 3),activation='relu',padding='same', data_format='channels_last'))

    model.add(Flatten())

    model.add(Dense(100, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(4))
    print(model.summary())

    # sgd = SGD(lr=0.01, momentum=0.9, nesterov=True)
    model.compile(optimizer= 'adam', loss='mean_squared_error', metrics=['mae'])
    return model
예제 #20
0
파일: model.py 프로젝트: joshanjinu/example
def load_model():
	"""
	Return the model used for abnormal event
	detection in videos using spatiotemporal autoencoder

	"""
	model=Sequential()
	model.add(Conv3D(filters=128,kernel_size=(11,11,1),strides=(4,4,1),padding='valid',input_shape=(227,227,10,1),activation='tanh'))
	#model.add(Conv3D(filters=128,kernel_size=(11,11,1),strides=(4,4,1),padding='valid',input_shape=(227,227,10,1),activation='relu'))
	model.add(Conv3D(filters=64,kernel_size=(5,5,1),strides=(2,2,1),padding='valid',activation='tanh'))
	#model.add(Conv3D(filters=64,kernel_size=(5,5,1),strides=(2,2,1),padding='valid',activation='relu'))

############ ConvLSTM2D
	model.add(ConvLSTM2D(filters=64,kernel_size=(3,3),strides=1,padding='same',dropout=0.4,recurrent_dropout=0.3,return_sequences=True))


	model.add(ConvLSTM2D(filters=32,kernel_size=(3,3),strides=1,padding='same',dropout=0.3,return_sequences=True))


	model.add(ConvLSTM2D(filters=64,kernel_size=(3,3),strides=1,return_sequences=True, padding='same',dropout=0.5))
#######################


	model.add(Conv3DTranspose(filters=128,kernel_size=(5,5,1),strides=(2,2,1),padding='valid',activation='tanh'))
	#model.add(Conv3DTranspose(filters=128,kernel_size=(5,5,1),strides=(2,2,1),padding='valid',activation='relu'))
	model.add(Conv3DTranspose(filters=1,kernel_size=(11,11,1),strides=(4,4,1),padding='valid',activation='tanh'))
	#model.add(Conv3DTranspose(filters=1,kernel_size=(11,11,1),strides=(4,4,1),padding='valid',activation='relu'))

	model.compile(optimizer='adam',loss='mean_squared_error',metrics=['accuracy'])

	return model
예제 #21
0
def createCONV2D_LSTM(no_win, no_act, no_con, params):
    ks = params[0]
    no_lstms = params[1]
    filt = params[2]
    act_reg = params[3]
    kern_reg = params[4]    
        
    model = Sequential()
    model.add(ConvLSTM2D(filters=filt, kernel_size=(ks, ks),
                       input_shape=(None, no_act, no_act, no_con),
                       padding='same', return_sequences=True,kernel_regularizer=l2(kern_reg),
                         activity_regularizer=l2(act_reg)))
    model.add(BatchNormalization())
    
    for i in range(0, no_lstms):
        model.add(ConvLSTM2D(filters=filt, kernel_size=(ks, ks),
                       padding='same', return_sequences=True,kernel_regularizer=l2(kern_reg),
                             activity_regularizer=l2(act_reg)))
        model.add(BatchNormalization())
       
    model.add(Conv3D(filters=no_con, kernel_size=(ks, ks, ks),
                   activation='sigmoid',
                   padding='same', data_format='channels_last',kernel_regularizer=l2(kern_reg),
                     activity_regularizer=l2(act_reg)))
    
    return model
예제 #22
0
def build_model():
    seq = Sequential()
    seq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,
                       input_shape=(512, 512, 512, 1),
                       border_mode='same', return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,
                       border_mode='same', return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,
                       border_mode='same', return_sequences=True))
    seq.add(BatchNormalization())

    seq.add(ConvLSTM2D(nb_filter=40, nb_row=3, nb_col=3,
                       border_mode='same', return_sequences=False))
    seq.add(BatchNormalization())
    seq.add(Flatten())

    seq.add(Dense(100, activation='tanh'))
    seq.add(Dense(100, activation='tanh'))
    seq.add(Dense(1, activation='sigmoid'))

    seq.compile(loss='binary_crossentropy', optimizer='nadam')

    return seq
예제 #23
0
def _build_network(sequence_length, img_width, img_height):
    input_layer = Input(shape=(sequence_length, img_width, img_height, 1))
    encoder = ConvLSTM2D(filters=128,
                         kernel_size=(3, 3),
                         padding='same',
                         return_sequences=True)(input_layer)
    # Reconstruct decoder
    # decoder1 = RepeatVector(sequence_length)(encoder)
    decoder1 = ConvLSTM2D(filters=128,
                          kernel_size=(3, 3),
                          padding='same',
                          return_sequences=True)(encoder)
    decoder1 = ConvLSTM2D(filters=1,
                          kernel_size=(3, 3),
                          padding='same',
                          return_sequences=True)(decoder1)
    # decoder1 = TimeDistributed(Conv3D(filters=1, kernel_size=(3,3,3), padding='same', activation='sigmoid', data_format='channels_last'))(decoder1)
    # decoder1 = Conv3D(filters=1, kernel_size=(3,3,3), padding='same', activation='sigmoid', data_format='channels_last')(decoder1)
    # Predict decoder
    # decoder2 = RepeatVector(sequence_length - 1)(encoder)
    decoder2 = ConvLSTM2D(filters=128,
                          kernel_size=(3, 3),
                          padding='same',
                          return_sequences=True)(encoder)
    decoder2 = ConvLSTM2D(filters=1,
                          kernel_size=(3, 3),
                          padding='same',
                          return_sequences=False)(decoder2)
    # decoder2 = TimeDistributed(Conv3D(filters=1, kernel_size=(3,3,3), padding='same', activation='sigmoid', data_format='channels_last'))(decoder2)
    # decoder2 = Conv3D(filters=1, kernel_size=(3,3,3), padding='same', activation='sigmoid', data_format='channels_last')(decoder2)
    # together
    model = Model(inputs=input_layer, outputs=[decoder1, decoder2])
    return model
예제 #24
0
def create_cnn(rows,columns, deep):
    # initialize the input shape and channel dimension, assuming
    # TensorFlow/channels-last ordering
    inputShape = (1, 1, rows, columns)
    model = Sequential()

    model.add(ConvLSTM2D(32, (16, 16), strides=(3,3),
        activation='relu', return_sequences=True, data_format='channels_first', 
                         padding='same', input_shape=inputShape, batch_input_shape=[1, deep, 1, rows, columns], stateful=True))
    model.add(ConvLSTM2D(64, (3,3), strides=(2,2),
        kernel_initializer="he_normal", return_sequences=True, activation='relu', data_format='channels_first', stateful=True ))
    model.add(TimeDistributed(MaxPooling2D((2, 2),  data_format='channels_first' )))
    model.add(ConvLSTM2D(64, (3,3), strides=(2, 2),
        padding='same', activation='relu', data_format='channels_first', return_sequences=True, stateful=True ))
    model.add(TimeDistributed(MaxPooling2D((2, 2),  data_format='channels_first')))


    model.add(TimeDistributed(Flatten()))

    model.add(LSTM(64, return_sequences=True, stateful=True))
    model.add(LSTM(32, return_sequences=False, stateful=True))
    model.add(Dense(1, activation='linear'))
    
    adam = Adam(lr=0.00001, decay=1e-6)
    model.compile(loss='mean_squared_error', optimizer=adam)
    
    return model
예제 #25
0
def _build_network(sequence_length, img_width, img_height):
    model = Sequential()
    model.add(
        ConvLSTM2D(
            filters=128,
            kernel_size=(3, 3),
            input_shape=(sequence_length, img_width, img_height, 1),
            padding='same',
            return_sequences=True,
        ))
    model.add(
        ConvLSTM2D(
            filters=64,
            kernel_size=(3, 3),
            padding='same',
            return_sequences=True,
        ))
    model.add(
        ConvLSTM2D(
            filters=64,
            kernel_size=(3, 3),
            padding='same',
            return_sequences=True,
        ))
    model.add(
        ConvLSTM2D(
            filters=1,
            kernel_size=(3, 3),
            padding='same',
            return_sequences=False,
        ))
    return model
예제 #26
0
 def _create_keras_model(self, args=None) -> keras.Model:
     seq = Sequential()
     seq.add(
         ConvLSTM2D(filters=40,
                    kernel_size=(1, 1),
                    input_shape=INPUT_SHAPE,
                    padding='same',
                    return_sequences=True,
                    activation='relu'))
     seq.add(BatchNormalization())
     seq.add(
         ConvLSTM2D(filters=40,
                    kernel_size=(3, 3),
                    input_shape=INPUT_SHAPE,
                    padding='same',
                    return_sequences=True,
                    activation='relu',
                    name="layer_encoder"))
     seq.add(BatchNormalization())
     seq.add(
         Conv3D(filters=IMAGE_CHANNELS,
                kernel_size=(1, 1, 1),
                activation='sigmoid',
                padding='same',
                data_format='channels_last'))
     return seq
예제 #27
0
def train_Conv2DLstm(data, label):

    train_Data = data[0: int(len(data) * train_ratio)]
    train_Label = label[0: int(len(data) * train_ratio)]
    train_Data = np.reshape(train_Data, (train_Data.shape[0],  train_Data.shape[1], 1,train_Data.shape[2],train_Data.shape[3]) )
    #train_Label = np.reshape(train_Label, (train_Label.shape[0],train_Label.shape[1], 1))

    test_Data = data[int(len(data) * train_ratio): len(data)]
    test_Label = label[int(len(data) * train_ratio): len(data)]
    test_Data = np.reshape(test_Data, (test_Data.shape[0], test_Data.shape[1], 1,test_Data.shape[2], test_Data.shape[3]))
    #test_Label = np.reshape(test_Label, (test_Label.shape[0], test_Label.shape[1], 1))

    model = Sequential()
    model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
                       input_shape=(date_size, 1, 3, 6), #day/ - / feature/ feature
                       padding='same', return_sequences=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
                       padding='same', return_sequences=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
                       padding='same', return_sequences=True))
    model.add(BatchNormalization())

    model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
                       padding='same', return_sequences=True))
    model.add(BatchNormalization())
    model.add(Conv3D(filters=1, kernel_size=(3, 3, 1),padding='same', data_format='channels_last'))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dense(256))
    model.add(Dense(1, activation='linear'))

    model.compile(loss='mse', optimizer='rmsprop', metrics=['acc'])

    print('Model Build...')
    model.summary()

    print('Train...')
    model.fit(train_Data, train_Label,
              epochs=5,
              batch_size=32, verbose=2,
              shuffle=False,
              validation_data=(test_Data, test_Label))

    testPredict = model.predict(test_Data)
    #testScore = math.sqrt(mean_squared_error(test_Label, testPredict))
    #print('Train Score: %.2f RMSE' % testScore)

    fig = plt.figure(facecolor='white', figsize=(10, 5))
    ax = fig.add_subplot(111)
    ax.plot(test_Label, label='True')
    ax.plot(testPredict, label='Prediction')
    ax.legend()
    plt.show()
예제 #28
0
def get_siamese_model(input_shape):

    # Define the tensors for the two input images
    left_input = Input(input_shape)
    right_input = Input(input_shape)

    # Convolutional Neural Network [Edit here to use different models]
    model = Sequential()
    model.add(
        ConvLSTM2D(16, (3, 3),
                   kernel_initializer=initialize_weights,
                   activation='relu',
                   padding='same',
                   return_sequences=True,
                   input_shape=input_shape))
    model.add(TimeDistributed(MaxPooling2D((4, 4))))
    model.add(
        ConvLSTM2D(16, (3, 3),
                   kernel_initializer=initialize_weights,
                   activation='relu',
                   padding='same',
                   return_sequences=True,
                   input_shape=input_shape))
    model.add(TimeDistributed(MaxPooling2D((4, 4))))
    model.add(
        ConvLSTM2D(16, (3, 3),
                   kernel_initializer=initialize_weights,
                   activation='relu',
                   padding='same',
                   return_sequences=True,
                   input_shape=input_shape))
    model.add(TimeDistributed(MaxPooling2D((4, 4))))

    model.add(Flatten())
    model.add(
        Dense(
            2048,
            activation='sigmoid',
            kernel_regularizer=l2(1e-3),
            kernel_initializer=initialize_weights,
            bias_initializer=initialize_bias,
        ))

    # Generate the encodings (feature vectors) for the two images
    encoded_l = model(left_input)
    encoded_r = model(right_input)

    # Add a customized layer to compute the absolute difference between the encodings
    L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))
    L1_distance = L1_layer([encoded_l, encoded_r])

    # Add a dense layer with a sigmoid unit to generate the similarity score
    prediction = Dense(1, activation='sigmoid')(L1_distance)

    # Connect the inputs with the outputs
    siamese_net = Model(inputs=[left_input, right_input], outputs=prediction)

    # return the model
    return siamese_net
예제 #29
0
        def model_creator(filters, kernel, dropout, NL):

            __model = Sequential()
            __model.add(
                BatchNormalization(name='batch_norm_0',
                                   input_shape=(in_dt, input_bus.shape[2],
                                                input_bus.shape[3],
                                                input_bus.shape[4])))

            for N in range(NL):
                if N == (NL - 1):
                    __model.add(
                        ConvLSTM2D(name=('lstm_%i_enc' % N),
                                   filters=filters[N],
                                   kernel_size=(kernel[N], 1),
                                   stateful=False,
                                   padding='same',
                                   return_sequences=False,
                                   data_format="channels_first"))
                else:
                    __model.add(
                        ConvLSTM2D(name=('lstm_%i_enc' % N),
                                   filters=filters[N],
                                   kernel_size=(kernel[N], 1),
                                   stateful=False,
                                   padding='same',
                                   return_sequences=True,
                                   data_format="channels_first"))

                __model.add(Dropout(dropout))

                __model.add(BatchNormalization())

            __model.add(Flatten())

            __model.add(RepeatVector(out_dt))

            __model.add(Reshape((out_dt, output_bus.shape[2], 1, filters[-1])))

            for N in range(NL):

                __model.add(
                    ConvLSTM2D(name=('lstm_%i_dec' % N),
                               filters=filters[-N],
                               kernel_size=(kernel[-N], 1),
                               stateful=False,
                               padding='same',
                               return_sequences=True))
                __model.add(Dropout(dropout))

                __model.add(BatchNormalization())

            __model.add(
                TimeDistributed(Dense(1, activation="sigmoid"), name='test'))

            return __model
            del (__model)
예제 #30
0
파일: new_models.py 프로젝트: geolvr/ADSNet
def ADSNet_O():
    # encoder: layers definition && data flow  --------------------------------------
    # CNN module 1 -------------------------------------
    encoder_inputs = Input(shape=(num_frames_truth, 159, 159, 1), name='encoder_inputs')  # (bs, 3, 159, 159, 1)
    encoder_conv2d_1 = TimeDistributed(Conv2D(filters=4, kernel_size=(5, 5), padding='same'),
                                       name='en_conv2d_1')(encoder_inputs)
    encoder_conv2d_1 = TimeDistributed(Activation('relu'))(encoder_conv2d_1)
    encoder_conv2d_1 = TimeDistributed(MaxPooling2D(padding='same'))(encoder_conv2d_1)
    encoder_conv2d_2 = TimeDistributed(Conv2D(filters=4, kernel_size=(5, 5), padding='same'),
                                       name='en_conv2d_2')(encoder_conv2d_1)
    encoder_conv2d_2 = TimeDistributed(Activation('relu'))(encoder_conv2d_2)
    encoder_conv2d_2 = TimeDistributed(MaxPooling2D(padding='same'))(encoder_conv2d_2)

    # ---------------------------------------------------
    _, en_h, en_c = ConvLSTM2D(filters=8, kernel_size=(5, 5), return_sequences=True, return_state=True, padding='same',
                               name='en_convlstm')(encoder_conv2d_2)
    # --------------------------------------------------------------------------------
    # # encoder to decoder: layers definition && data flow  --------------------
    en_h = Conv2D(filters=16, kernel_size=(1, 1), padding="same", name='en_de_h', activation='relu')(en_h)
    en_c = Conv2D(filters=16, kernel_size=(1, 1), padding="same", name='en_de_c', activation='relu')(en_c)
    # --------------------------------------------------------------------------------
    # decoder: layers definition && dataflow -----------------------------------------
    # CNN module 2 -----------------------------------------------------------
    de_conv2d_1 = TimeDistributed(Conv2D(filters=4, kernel_size=(5, 5), padding='same',activation='relu'), name='de_conv2d_1')
    de_conv2d_2 = TimeDistributed(Conv2D(filters=4, kernel_size=(5, 5), padding='same',activation='relu'), name='de_conv2d_2')
    # -------------------------------------------------------------------------
    # DCNN module ------------------------------------------------------------
    de_conv2dT_1 = TimeDistributed(Conv2DTranspose(filters=32, kernel_size=(5, 5), strides=(2, 2), padding='same',activation='relu'),
                                    name='de_conv2dT_1')
    de_conv2dT_2 = TimeDistributed(Conv2DTranspose(filters=32, kernel_size=(5, 5), strides=(2, 2), padding='same',activation='relu'),
                                   name='de_conv2dT_2')
    de_conv_out = TimeDistributed(Conv2D(filters=1, kernel_size=(1, 1), padding="same"), name='de_conv_out')
    # ---------------------------------------------------------------------------------
    de_convlstm = ConvLSTM2D(filters=16, return_sequences=True, return_state=True,
                                  kernel_size=(5, 5), name='de_convlstm', padding='same')
    decoder_input_t = Cropping3D(data_format='channels_last', cropping=((num_frames_truth - 1, 0), (0, 0), (0, 0)))(encoder_inputs)
    out_list = []
    de_h = en_h
    de_c = en_c
    cropper = Cropping3D(cropping=((0, 0), (0, 1), (0, 1)))
    sigmoid = Activation('sigmoid')
    for t in range(num_frames):
        decoder_conv2d_1 = de_conv2d_1(decoder_input_t)
        decoder_conv2d_1 = TimeDistributed(MaxPooling2D(padding='same'))(decoder_conv2d_1)
        decoder_conv2d_2 = de_conv2d_2(decoder_conv2d_1)
        decoder_conv2d_2 = TimeDistributed(MaxPooling2D(padding='same'))(decoder_conv2d_2)
        decoder_convlstm_t, de_h, de_c = de_convlstm([decoder_conv2d_2, de_h, de_c])
        decoder_conv2dT_1 = de_conv2dT_1(decoder_convlstm_t)
        decoder_conv2dT_2 = de_conv2dT_2(decoder_conv2dT_1)
        decoder_out_t = de_conv_out(decoder_conv2dT_2)
        decoder_out_t = cropper(decoder_out_t)
        out_list.append(decoder_out_t)
        decoder_input_t = sigmoid(decoder_out_t)

    decoder_outputs = Lambda(lambda x: K.concatenate(x, axis=1))(out_list)  # (bs, 12, 159, 159, 1)
    decoder_outputs = Reshape((-1, 159 * 159, 1), input_shape=(-1, 159, 159, 1))(decoder_outputs)
    return Model(encoder_inputs, decoder_outputs, name='ADSNet_O')