コード例 #1
0
ファイル: vgg16_lstm.py プロジェクト: lamductan/modis_utils
    def _create_model(img_height, img_width, input_timesteps, compile_params, weights):
        if weights == True:
            weights = 'imagenet'
            channels = 3
        else:
            weights = None
            channels = 1
        # Prepair
        input_shape = (input_timesteps, img_height, img_width, channels)

        # Model architecture
        source = Input(
            name='seed', shape=input_shape, dtype=tf.float32)
        
        vgg16_encoder_block = vgg16_encoder(input_shape=input_shape[1:], weights=weights)
        net = TimeDistributed(vgg16_encoder_block)(source)
        
        net = ConvLSTM2D(filters=128, kernel_size=3, padding='same', return_sequences=True)(net)
        net = BatchNormalization()(net)
        net = ConvLSTM2D(filters=1, kernel_size=3, padding='same', return_sequences=False)(net)
        net = BatchNormalization()(net)
        #net = Reshape(target_shape=(64, 64, 1))(net)
        
        vgg16_decoder = resnet_up(net, scale=4)
        vgg16_decoder = BatchNormalization()(vgg16_decoder)
        vgg16_decoder = resnet_up(vgg16_decoder, scale=4)
        vgg16_decoder = BatchNormalization()(vgg16_decoder)
        predicted_img = Activation('sigmoid')(vgg16_decoder)
        
        model = Model(inputs=[source], outputs=[predicted_img])
        
        # Compile model
        model = compile_model(model, compile_params)
        return model 
コード例 #2
0
ファイル: cplx_model.py プロジェクト: lamductan/modis_utils
    def _create_model(img_height, img_width, input_timesteps, compile_params):
        input_shape = (input_timesteps, img_height, img_width, 1)
        x = Input(shape=input_shape, name='input')

        encoder_input_shape = input_shape[1:]
        encode_block = SkipConvLSTMSingleOutput._create_encoder(
            encoder_input_shape)
        net = TimeDistributed(encode_block)(x)

        net = ConvLSTM2D(filters=128,
                         kernel_size=3,
                         padding='same',
                         return_sequences=True)(net)
        net = BatchNormalization()(net)
        hidden = ConvLSTM2D(filters=80,
                            kernel_size=3,
                            padding='same',
                            return_sequences=False)(net)
        hidden = BatchNormalization()(hidden)

        decode_block = SkipConvLSTMSingleOutput._create_decoder(
            hidden.shape[1:], encode_block)
        net = decode_block([hidden, Lambda(lambda x: x[:, -1, :, :, :])(x)])
        net = Activation('sigmoid')(net)

        model = Model(inputs=x, outputs=net, name='skip_conv_single_output')
        model = compile_model(model, compile_params)
        return model
コード例 #3
0
    def _create_model(img_height, img_width, input_timesteps, compile_params):
        # Prepair
        input_shape = (input_timesteps, img_height, img_width, 1)

        # Model architecture
        source = keras.Input(name='seed', shape=input_shape, dtype=tf.float32)
        model = conv_lstm_2D(filters=128,
                             kernel_size=3,
                             strides=1,
                             padding='same')(source)
        model = BatchNormalization()(model)
        model = conv_lstm_2D(filters=128,
                             kernel_size=3,
                             strides=1,
                             padding='same')(model)
        model = BatchNormalization()(model)
        model = conv_lstm_2D(filters=128,
                             kernel_size=3,
                             strides=1,
                             padding='same')(model)
        model = BatchNormalization()(model)
        model = conv_lstm_2D(filters=128,
                             kernel_size=3,
                             strides=1,
                             padding='same')(model)
        model = BatchNormalization()(model)
        model = conv_lstm_2D(filters=128,
                             kernel_size=3,
                             strides=1,
                             padding='same',
                             return_sequences=False)(model)
        model = BatchNormalization()(model)
        predict_img = conv_2D(filters=1,
                              kernel_size=3,
                              strides=1,
                              padding='same')(model)
        model = keras.Model(inputs=[source], outputs=[predict_img])

        # Compile model
        model = compile_model(model, compile_params)
        return model
コード例 #4
0
def trainByCompileParams(type, lr):
    print('Training with {0}, lr = {1}'.format(type, lr))

    class MyGenerator(Sequence):
        def __init__(self, data_filenames, batch_size):
            self.data_filenames = data_filenames
            self.batch_size = batch_size

        def __len__(self):
            return len(self.data_filenames)

        def __getitem__(self, idx):
            data = restore_data('output/{0}'.format(self.data_filenames[idx]))
            i = idx
            batch_X = np.expand_dims(np.expand_dims(data[:-1, :, :], axis=0),
                                     axis=-1)
            batch_Y = np.expand_dims(np.expand_dims(data[-1, :, :], axis=0),
                                     axis=-1)

            __max__ = 17.0
            __min__ = -34.0
            __range__ = __max__ - __min__

            X = (batch_X - __min__) / __range__
            Y = (batch_Y - __min__) / __range__

            return (X, Y)

    listFile = os.listdir('output/')
    nTrain = int(len(listFile) * 0.60)
    nVal = int(len(listFile) * 0.3)
    trainFiles = listFile[:nTrain]
    valFiles = listFile[nTrain:nTrain + nVal]
    testFiles = listFile[nTrain + nVal:]

    trainGenerator = MyGenerator(trainFiles, 1)
    valGenerator = MyGenerator(valFiles, 1)

    input_timesteps = 12
    img_height = 128
    img_width = 128

    # default: adam
    opt = optimizers.Adam(lr=lr)
    if (type == 'sgd'):
        opt = optimizers.SGD(lr=lr)

    input_shape = (input_timesteps, img_height, img_width, 1)
    compile_params = {'optimizer': opt, 'loss': 'mse', 'metrics': [PSNRLoss]}

    # Model architecture
    source = keras.Input(name='seed', shape=input_shape, dtype=tf.float32)
    model = conv_lstm_2D(filters=64, kernel_size=3, strides=1,
                         padding='same')(source)
    model = BatchNormalization()(model)
    model = conv_lstm_2D(filters=64, kernel_size=3, strides=1,
                         padding='same')(model)
    model = BatchNormalization()(model)
    model = conv_lstm_2D(filters=64, kernel_size=3, strides=1,
                         padding='same')(model)
    model = BatchNormalization()(model)
    model = conv_lstm_2D(filters=64, kernel_size=3, strides=1,
                         padding='same')(model)
    model = BatchNormalization()(model)
    model = conv_lstm_2D(filters=64,
                         kernel_size=3,
                         strides=1,
                         padding='same',
                         return_sequences=False)(model)
    model = BatchNormalization()(model)

    predict_img = conv_2D(filters=1, kernel_size=3, strides=1,
                          padding='same')(model)
    model = keras.Model(inputs=[source], outputs=[predict_img])

    model = compile_model(model, compile_params)
    model.fit_generator(generator=trainGenerator,
                        steps_per_epoch=nTrain,
                        epochs=60,
                        validation_data=valGenerator,
                        validation_steps=2000)

    modelFn = 'model_{0}_{1}.h5'.format(type, str(lr).replace('.', ''))

    model.save(modelFn)

    print('Trained to {0}'.format(modelFn))

    del model