Ejemplo n.º 1
0
    def build(self, hp):
        """ Building and compiling a hyper convolutional lstm model.

        Parameters
        --------------------
        hp : kerastuner.HyperParameters
            Container for both a hyperparameter space, and current values.

        Returns
        -----------------


        """
        model = Sequential()
        # Adding the first layer
        model.add(
            ConvLSTM2D(
                filters=hp.Int('filters', min_value=32, max_value=512,
                               step=32),
                kernel_size=(3, 3),
                input_shape=(self.seq_length, n_lat, n_lon,
                             self.NUM_INPUT_VARS),
                #kernel_initializer=self.KERNAL_INIT,
                padding=self.PADDING,
                return_sequences=self.RETURN_SEQUENCE,
                data_format=self.DATA_FORMAT))

        #prev_filter = filters[0]
        for i in range(self.num_hidden_layers):
            # Begin with 3D convolutional LSTM layer
            model.add(
                ConvLSTM2D(
                    filters=hp.Int('filters',
                                   min_value=32,
                                   max_value=512,
                                   step=32),
                    kernel_size=(3, 3),
                    #kernel_initializer=self.KERNAL_INIT,
                    padding=self.PADDING,
                    return_sequences=self.RETURN_SEQUENCE,
                    data_format=self.DATA_FORMAT))
        # Adding the last layer
        model.add(
            ConvLSTM2D(
                filters=self.OUTPUT_FILTER,
                kernel_size=(1, 1),
                #kernel_initializer = self.KERNAL_INIT,
                padding=self.PADDING,
                return_sequences=self.RETURN_SEQUENCE,
                data_format=self.DATA_FORMAT))

        model.compile(optimizer=optimizers.Adam(
            hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),
                      loss=hp.Choice(
                          'loss',
                          values=['mean_squared_error', 'mean_absolute_error'],
                          default='mean_squared_error'),
                      metrics=['mean_squared_error', 'mean_absolute_error'])

        return model
Ejemplo n.º 2
0
 def __init__(self):
     super(Autoencoder, self).__init__()
     self.encoder = Sequential([
         Input(shape=input_shape),
         Reshape((1, 30, 32, 32)),
         TimeDistributed(Conv2D(32, (3, 3), padding='SAME')),
         TimeDistributed(Conv2D(128, (3, 3), padding='SAME')),
         LayerNormalization(),
         ConvLSTM2D(64, (3, 3),
                    strides=(1, 1),
                    padding='SAME',
                    return_sequences=True),
         LayerNormalization(),
         ConvLSTM2D(8, (3, 3),
                    strides=(1, 1),
                    padding='SAME',
                    return_sequences=True)
     ],
                               name='Encoder')
     self.decoder = Sequential([
         TimeDistributed(Conv2DTranspose(32, (3, 3), padding='SAME')),
         LayerNormalization(),
         TimeDistributed(Conv2DTranspose(32, (3, 3), padding='SAME')),
     ],
                               name='Decoder')
Ejemplo n.º 3
0
    def __init__(self, num_classes, samples_per_class, dropout=0.1):
        super(ConvMANN, self).__init__()

        self.num_classes = num_classes
        self.samples_per_class = samples_per_class

        self.conv1 = ConvLSTM2D(16,
                                3,
                                activation='relu',
                                dropout=dropout,
                                recurrent_dropout=2 * dropout,
                                return_sequences=True)
        self.pool1 = TimeDistributed(MaxPool2D())

        self.conv2 = ConvLSTM2D(32,
                                3,
                                activation='relu',
                                dropout=dropout,
                                recurrent_dropout=2 * dropout,
                                return_sequences=True)
        self.pool2 = TimeDistributed(MaxPool2D())

        self.flatten = TimeDistributed(Flatten())

        self.lstm1 = LSTM(256, return_sequences=True)
        self.lstm2 = LSTM(num_classes, return_sequences=True)
Ejemplo n.º 4
0
def deconv_block_lstm_sd(base, conc_layer, layer, batch_norm, dropout,
                         img_size, dr_rate):
    layer_conv_transpose = Conv2DTranspose(filters=base,
                                           kernel_size=(3, 3),
                                           strides=(2, 2),
                                           padding='same')(layer)

    x1 = Reshape(target_shape=(1, np.int32(img_size), np.int32(img_size),
                               base))(conc_layer)
    x2 = Reshape(target_shape=(1, np.int32(img_size), np.int32(img_size),
                               base))(layer_conv_transpose)

    layer_conc = concatenate([x1, x2], axis=1)
    if dropout:
        layer_lstm = ConvLSTM2D(np.int32(base / 2), (3, 3),
                                padding='same',
                                return_sequences=False,
                                go_backwards=True)(layer_conc)
        layer_d = SpatialDropout2D(dr_rate)(layer_lstm)
        layer_b = conv_block(base, layer_d, batch_norm)
    else:
        layer_lstm = ConvLSTM2D(np.int32(base / 2), (3, 3),
                                padding='same',
                                return_sequences=False,
                                go_backwards=True)(layer_conc)
        layer_b = conv_block(base, layer_lstm, batch_norm)

    return layer_b
Ejemplo n.º 5
0
def load_model():
	"""
	Return the model used for abnormal event 
	detection in videos using spatiotemporal autoencoder

	"""
	model=Sequential()
	model.add(Conv3D(filters=128,kernel_size=(11,11,1),strides=(4,4,1),padding='valid',input_shape=(227,227,10,1),activation='tanh'))
	model.add(Conv3D(filters=64,kernel_size=(5,5,1),strides=(2,2,1),padding='valid',activation='tanh'))



	model.add(ConvLSTM2D(filters=64,kernel_size=(3,3),strides=1,padding='same',dropout=0.4,recurrent_dropout=0.3,return_sequences=True))

	
	model.add(ConvLSTM2D(filters=32,kernel_size=(3,3),strides=1,padding='same',dropout=0.3,return_sequences=True))


	model.add(ConvLSTM2D(filters=64,kernel_size=(3,3),strides=1,return_sequences=True, padding='same',dropout=0.5))




	model.add(Conv3DTranspose(filters=128,kernel_size=(5,5,1),strides=(2,2,1),padding='valid',activation='tanh'))
	model.add(Conv3DTranspose(filters=1,kernel_size=(11,11,1),strides=(4,4,1),padding='valid',activation='tanh'))

	model.compile(optimizer='adam',loss='mean_squared_error',metrics=['accuracy'])

	return model
Ejemplo n.º 6
0
def encode_block_lstm(size, inputs, kernel, stride, activation, kinit, padding, max_pool=True,
                      batch_normalization=False, mask=None):
    result = []
    use_bias = not batch_normalization
    x, state_h, state_c = ConvLSTM2D(size, kernel_size=kernel, strides=stride,
                                     kernel_initializer=kinit, use_bias=use_bias,
                                     padding=padding, return_sequences=True, return_state=True)(inputs, mask=mask)
    x = BatchNormalization()(x) if batch_normalization else x
    x = Activation(activation)(x)

    x, state_h, state_c = ConvLSTM2D(size, kernel_size=kernel, strides=stride,
                                     kernel_initializer=kinit, use_bias=use_bias,
                                     padding=padding, return_sequences=True, return_state=True)(x,
                                                                                                mask=mask)  # can't set initial_state=(state_h, state_c) due to a bug in keras

    x = BatchNormalization()(x) if batch_normalization else x
    x = Activation(activation)(x)
    # result.append(x)
    result.append(state_c)

    if max_pool:
        pool1 = MaxPooling3D(pool_size=(2, 2, 2))(x)
        result.append(pool1)
    else:
        result.append(None)

    return result
Ejemplo n.º 7
0
def main():
    Inputs = []
    Outputs = []
    input = Input(shape=(15, 40, 40, 3))
    Inputs.append(input)
    convlstm1 = ConvLSTM2D(filters=30,
                           kernel_size=(3, 3),
                           padding='same',
                           return_sequences=True,
                           data_format='channels_last')(input)
    convlstm2 = ConvLSTM2D(filters=50,
                           kernel_size=(3, 3),
                           padding='same',
                           return_sequences=True,
                           data_format='channels_last')(convlstm1)
    convlstm3 = ConvLSTM2D(filters=60,
                           kernel_size=(3, 3),
                           padding='same',
                           return_sequences=True,
                           data_format='channels_last')(convlstm2)
    convlstm4 = ConvLSTM2D(filters=70,
                           kernel_size=(3, 3),
                           padding='same',
                           return_sequences=False,
                           data_format='channels_last')(convlstm3)
    Outputs.append(convlstm4)
    model = Model(inputs=input, outputs=convlstm4)
    model.summary()
Ejemplo n.º 8
0
def encode_block_lstm(size,
                      inputs,
                      kernel,
                      stride,
                      activation,
                      kinit,
                      padding,
                      max_pool=True,
                      batch_normalization=False,
                      mask=None):
    result = []
    x = ConvLSTM2D(size,
                   kernel_size=kernel,
                   strides=stride,
                   activation=activation,
                   kernel_initializer=kinit,
                   padding=padding,
                   return_sequences=True)(inputs, mask=mask)

    x = ConvLSTM2D(size,
                   kernel_size=kernel,
                   strides=stride,
                   activation=activation,
                   kernel_initializer=kinit,
                   padding=padding,
                   return_sequences=False)(x, mask=mask)

    x = BatchNormalization()(x) if batch_normalization else x
    result.append(x)

    if max_pool:
        pool1 = MaxPooling2D(pool_size=(2, 2))(x)
        result.append(pool1)

    return result
Ejemplo n.º 9
0
def encode_block_lstm(size, inputs, kernel, stride, activation, kinit, padding, max_pool=True,
                      batch_normalization=False, mask=None):
    result = []
    use_bias = not batch_normalization
    x, state_h, state_c = ConvLSTM2D(size, kernel_size=kernel, strides=stride,
                                     kernel_initializer=kinit, use_bias=use_bias, activation='linear',
                                     padding=padding, return_sequences=True, return_state=True)(inputs, mask=mask)
    x = BatchNormalization()(x) if batch_normalization else x
    x = PReLU()(x) # In theory this should avoid the vanishing gradient situation that is, arguably more accute with RNNs

    x, state_h, state_c = ConvLSTM2D(size, kernel_size=kernel, strides=stride,
                                     kernel_initializer=kinit, use_bias=use_bias, activation='linear',
                                     padding=padding, return_sequences=True, return_state=True)(x,
                                                                                                mask=mask)  # can't set initial_state=(state_h, state_c) due to a bug in keras

    x = BatchNormalization()(x) if batch_normalization else x
    x = PReLU()(x)
    # result.append(x)
    result.append(state_c)

    if max_pool:
        pool1 = MaxPooling3D(pool_size=(2, 2, 2))(x)
        result.append(pool1)
    else:
        result.append(None)

    return result
Ejemplo n.º 10
0
def create_model(pixel, filters, channel, hiddenlayers=4):
    seq = Sequential()
    #seq.add(BatchNormalization(trainable=False))
    seq.add(
        ConvLSTM2D(filters=filters,
                   kernel_size=(3, 3),
                   input_shape=(None, pixel, pixel, channel),
                   padding='same',
                   return_sequences=True)
    )  #activation = 'tanh', recurrent_activation = 'tanh')),activation = 'elu'
    #seq.add(BatchNormalization(trainable=False))
    for layer in range(hiddenlayers - 1):
        seq.add(
            ConvLSTM2D(filters=filters,
                       kernel_size=(3, 3),
                       padding='same',
                       return_sequences=True)
        )  # activation = 'tanh', recurrent_activation = 'tanh'))
    seq.add(
        ConvLSTM2D(filters=filters,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=False)
    )  #activation = 'tanh', recurrent_activation = 'tanh'))

    seq.add(
        Conv2D(filters=1,
               kernel_size=(3, 3),
               activation='elu',
               padding='same',
               data_format='channels_last'))
    #seq.add(BatchNormalization(trainable=False))
    seq.compile(loss='mean_squared_error', optimizer='adam', metrics=['mae'])
    return seq
Ejemplo n.º 11
0
 def model_function():
     new_shape = (num_steps, the_shape[0], the_shape[1], 1)
     #new_shape_conv10 = (num_steps, the_shape[0]-conv_window, the_shape[1]-conv_window, 1)
 
     model = Sequential()
     model.add(ConvLSTM2D(filters=1, kernel_size=(3, 3),
         input_shape=new_shape, data_format='channels_last', return_sequences=True,
         padding='same', activation='relu'))
     model.add(ConvLSTM2D(filters=1, kernel_size=(9, 9),
         data_format='channels_last', return_sequences=True,
         padding='same', activation='relu'))
     model.add(ConvLSTM2D(filters=1, kernel_size=(9, 9),
         data_format='channels_last', return_sequences=True,
         padding='same', activation='relu'))
     model.add(ConvLSTM2D(filters=1, kernel_size=(3, 3),
         data_format='channels_last', return_sequences=False,
         padding='same', activation='relu'))
     model.add(Conv2D(filters=1, kernel_size=(1, 1), padding='same',
                  data_format='channels_last', activation='sigmoid'))
     #odel.add(ConvLSTM2D(1, 10, input_shape=new_shape_conv10, data_format='channels_last', return_sequences=True))
     #if use_dropout:
     #    model.add(Dropout(0.5))
     optimizer = SGD(clipnorm=1.0, clipvalue=0.5) 
     model.compile(loss='mean_squared_error', optimizer=optimizer)
 
     return model
Ejemplo n.º 12
0
    def ano_model(self, input_shape):
        """
        Define different model from the main one (another model):
        (You can hyperpameter, add hidden layer, change filter, change kernel, ... anything)
        Input the same with the main model
        Returm:
        - model: New model
        """
        model = Sequential()
        #### Question (d): your implementation starts here (don't delete this line)
        model.add(
            ConvLSTM2D(filters=64,
                       kernel_size=5,
                       strides=2,
                       padding="same",
                       input_shape=input_shape,
                       return_sequences=True))
        model.add(
            ConvLSTM2D(filters=64, kernel_size=5, strides=2, padding="same"))
        model.add(Dropout(0.5))
        model.add(Flatten())
        model.add(Dense(units=256))
        model.add(Dropout(0.5))
        model.add(Dense(units=101))

        #### Question (d): your implementation ends here (don't delete this line)
        return model
def joint_convLstm(num_filters, kernel_length, input_timesteps, num_links,
                   output_timesteps, quantiles, prob, loss, opt):
    model = Sequential()
    model.add(
        BatchNormalization(name='batch_norm_0',
                           input_shape=(input_timesteps, num_links, 1, 1)))
    model.add(
        ConvLSTM2D(name='conv_lstm_1',
                   filters=num_filters,
                   kernel_size=(kernel_length, 1),
                   padding='same',
                   return_sequences=False))

    model.add(Dropout(prob, name='dropout_1'))
    model.add(BatchNormalization(name='batch_norm_1'))

    model.add(Flatten())
    model.add(RepeatVector(output_timesteps))
    model.add(Reshape((output_timesteps, num_links, 1, num_filters)))

    model.add(
        ConvLSTM2D(name='conv_lstm_2',
                   filters=num_filters,
                   kernel_size=(kernel_length, 1),
                   padding='same',
                   return_sequences=True))
    model.add(Dropout(prob, name='dropout_2'))

    model.add(TimeDistributed(Dense(units=len(quantiles) + 1, name='dense_1')))
    model.compile(loss=loss, optimizer=opt)
    return model
Ejemplo n.º 14
0
    def _get_model(self):
        """Initializes keras sequential neural network model"""

        self.model = Sequential()
        self.model.add(ConvLSTM2D(filters=32,
                                      kernel_size=(5,5),
                                      input_shape=(self.seq_len,self.processor.lat_len,self.processor.lon_len,self.processor.channel),
                                      data_format='channels_last',
                                      padding='same',
                                      return_sequences=True))
        self.model.add(MaxPool3D(pool_size=(1,4,4),
                                     padding='valid',
                                     data_format='channels_last'))
        self.model.add(ConvLSTM2D(filters=16,
                                       kernel_size=(3,3),
                                       data_format='channels_last',
                                       padding='same',
                                       return_sequences=True))
        self.model.add(MaxPool3D(pool_size=(1,4,4),
                                     padding='valid',
                                     data_format='channels_last'))
        self.model.add(Flatten())
        self.model.add(Dense(256))
        self.model.add(Dropout(0.2))
        self.model.add(Dense(16))
        self.model.add(Dropout(0.2))
        self.model.add(Dense(len(self.processor.target_levels)))

        loss = binary_crossentropy
        opt = Adadelta()
        mets = categorical_accuracy
        self.model.compile(loss=loss,optimizer=opt,metrics=[mets])
Ejemplo n.º 15
0
def create_model(args):
    input_image = Input(name='img',
                        shape=(args.frame_size, args.image_size,
                               args.image_size, 3))

    input_mask = Input(name='mask', shape=(args.frame_size, ))

    conv_1 = ConvLSTM2D(filters=20,
                        kernel_size=(3, 3),
                        padding='same',
                        return_sequences=True)(input_image, mask=input_mask)

    batch_1 = BatchNormalization()(conv_1)

    conv_2 = ConvLSTM2D(filters=10, kernel_size=(3, 3),
                        padding='same')(batch_1, mask=input_mask)

    batch_2 = BatchNormalization()(conv_2)

    flat = Flatten()(batch_2)

    dense_1 = Dense(32, activation='relu')(flat)

    output = Dense(2, activation='sigmoid')(dense_1)

    model = Model([input_image, input_mask], output)

    return model
Ejemplo n.º 16
0
    def __init__(self, num_layers, num_filters=32, *args, **kwargs):
        super(DeterministicModel, self).__init__(*args, **kwargs)
        self.num_layers = num_layers
        self.num_filters = num_filters

        self.conv_lstm_layers = [
            ConvLSTM2D(filters=num_filters,
                       kernel_size=(5, 5),
                       input_shape=(None, 224, 224,
                                    3 if i == 0 else num_filters),
                       padding='same',
                       return_sequences=True) for i in range(num_layers)
        ]

        self.final_conv_lstm = ConvLSTM2D(filters=num_filters,
                                          kernel_size=(5, 5),
                                          input_shape=(None, 224, 224,
                                                       num_filters),
                                          padding='same',
                                          return_sequences=False)
        self.final_conv = Conv2D(filters=3,
                                 kernel_size=(3, 3),
                                 padding='same',
                                 activation='sigmoid')

        self.batchnorms = [BatchNormalization() for _ in range(num_layers + 1)]
Ejemplo n.º 17
0
def model_5_ts(
    input_shape: Tuple[int, int, int, int],
    pool_size: Tuple[int, int] = (1, 1)
) -> List:
    """ Constructs the convolutional LSTM layer w/ decoder & encoder.

    :param input_shape: input shape (nb_steps, height, width, nb_channels)
    :param pool_size: pool size for the sampling layers.
    :returns: list of layers for the model, used in the Sequential model
    """

    encoder_ts = [
        TimeDistributed(layer, input_shape=input_shape)
        for layer in encoder_5(None, pool_size=pool_size)
    ]

    decoder_ts = [
        TimeDistributed(layer) for layer in decoder_5(pool_size=pool_size)
    ]

    return encoder_ts +\
           [ ConvLSTM2D( filters          = 32
                       , kernel_size      = (3, 3)
                       , data_format      = 'channels_last'
                       , padding          = 'same'
                       , return_sequences = True)
           , ConvLSTM2D( filters          = 32
                       , kernel_size      = (3, 3)
                       , data_format      = 'channels_last'
                       , padding          = 'same'
                       , return_sequences = True) ] +\
           decoder_ts
def get_compiled_model():

    # shape=(batch_size, time_steps, channels, row, col)
    input_flir = Input(shape=(
        6,
        3,
        480,
        640,
    ))

    input_bottom = Input(shape=(
        6,
        3,
        480,
        640,
    ))

    input_top = Input(shape=(
        6,
        3,
        480,
        640,
    ))

    x_concat = Concatenate(axis=-1)([input_flir, input_bottom, input_top])
    x_concat = TimeDistributed(
        SeparableConv2D(8, (4, 4), activation="relu",
                        padding="same"))(x_concat)
    x_ConvLSTM2D = ConvLSTM2D(32, (6, 6),
                              padding="same",
                              kernel_regularizer=regularizers.l1_l2(l1=1e-5,
                                                                    l2=1e-4),
                              dropout=0.1,
                              recurrent_dropout=0.1,
                              return_sequences=True)(x_concat)
    x_ConvLSTM2D = BatchNormalization()(x_ConvLSTM2D)
    x_ConvLSTM2D = ConvLSTM2D(64, (4, 4),
                              padding="same",
                              kernel_regularizer=regularizers.l1_l2(l1=1e-5,
                                                                    l2=1e-4),
                              dropout=0.1,
                              recurrent_dropout=0.1,
                              return_sequences=False)(x_ConvLSTM2D)
    x_ConvLSTM2D = BatchNormalization()(x_ConvLSTM2D)

    x_flat = GlobalAveragePooling2D()(x_ConvLSTM2D)
    x_flat = Dropout(.2)(x_flat)
    yh = Dense(3,
               activation="softmax",
               kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4))(x_flat)

    model = Model([input_flir, input_bottom, input_top], yh)

    opt = SGD(lr=1e-4, momentum=0.9, decay=1e-4)
    model.compile(loss=categorical_crossentropy,
                  optimizer=opt,
                  metrics=["accuracy"])
    return model
def BuildModel(input_shape=(227, 227, 10, 1)):
    if len(input_shape) != 4 or type(input_shape) != tuple:
        raise ValueError(
            'Invalid value given to the argument `input_shape`, it must be a `tuple` containing 4 values in this manner: (height, width, frames_per_input, channels)'
        )
    input = Input(shape=input_shape)
    #Spatial Encoder
    spatial_enc = Conv3D(filters=128,
                         kernel_size=(11, 11, 1),
                         strides=(4, 4, 1),
                         padding='valid',
                         activation='tanh')(input)
    spatial_enc = Conv3D(filters=64,
                         kernel_size=(5, 5, 1),
                         strides=(2, 2, 1),
                         padding='valid',
                         activation='tanh')(spatial_enc)

    #Temporal Encoder
    temporal_enc = ConvLSTM2D(filters=64,
                              kernel_size=(3, 3),
                              strides=1,
                              padding='same',
                              dropout=0.4,
                              recurrent_dropout=0.3,
                              return_sequences=True)(spatial_enc)
    temporal_enc = ConvLSTM2D(filters=32,
                              kernel_size=(3, 3),
                              strides=1,
                              padding='same',
                              dropout=0.3,
                              return_sequences=True)(temporal_enc)

    #Temporal Decoder
    temporal_dec = ConvLSTM2D(filters=64,
                              kernel_size=(3, 3),
                              strides=1,
                              return_sequences=True,
                              padding='same',
                              dropout=0.5)(temporal_enc)

    #Spatial Decoder
    spatial_dec = Conv3DTranspose(filters=128,
                                  kernel_size=(5, 5, 1),
                                  strides=(2, 2, 1),
                                  padding='valid',
                                  activation='tanh')(temporal_dec)
    spatial_dec = Conv3DTranspose(filters=1,
                                  kernel_size=(11, 11, 1),
                                  strides=(4, 4, 1),
                                  padding='valid',
                                  activation='tanh')(spatial_dec)

    # Model
    model = Model(inputs=input, outputs=spatial_dec)
    encoder = Model(inputs=input, outputs=temporal_enc)
    return model, encoder
Ejemplo n.º 20
0
def train():
    # Load Dataset
    x = np.load(
        directory.joinpath('datasets').joinpath(filename_x + '.npy')) / 255
    y = np.load(directory.joinpath('datasets').joinpath(filename_y + '.npy'))

    # Add Number Of Color Channels
    x = np.expand_dims(x, axis=-1)

    input_shape = x.shape[1:]
    output_shape = y.shape[-1]

    # Create Model
    model = Sequential()

    model.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   input_shape=input_shape,
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(2, 2, 2), padding='same'))

    model.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=True))
    model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(2, 2, 2), padding='same'))

    model.add(
        ConvLSTM2D(filters=40,
                   kernel_size=(3, 3),
                   padding='same',
                   return_sequences=False))
    model.add(BatchNormalization())

    model.add(Flatten())

    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(output_shape, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['binary_accuracy'])

    # Train Model
    model.fit(x, y, epochs=200, batch_size=10, validation_split=0.05)

    # Save Model
    model.save(str(directory.joinpath('models').joinpath(filename_model)))
Ejemplo n.º 21
0
def get_model(reload_model=False):
    # def get_model():
    """
    Parameters
    ----------
    reload_model : bool
        Load saved model or retrain it
    """

    if not reload_model:
        return load_model(MODEL_PATH,
                          custom_objects={
                              'LayerNormalization':
                              tf.keras.layers.LayerNormalization
                          })
    training_set = get_training_set()
    training_set = np.array(training_set)
    seq = Sequential()
    seq.add(
        TimeDistributed(Conv2D(128, (11, 11), strides=4, padding="same"),
                        batch_input_shape=(None, 10, 256, 256, 3)))
    seq.add(tf.keras.layers.LayerNormalization())
    seq.add(TimeDistributed(Conv2D(64, (5, 5), strides=2, padding="same")))
    seq.add(tf.keras.layers.LayerNormalization())
    # # # # #
    seq.add(ConvLSTM2D(64, (3, 3), padding="same", return_sequences=True))
    seq.add(tf.keras.layers.LayerNormalization())
    seq.add(ConvLSTM2D(32, (3, 3), padding="same", return_sequences=True))
    seq.add(tf.keras.layers.LayerNormalization())
    seq.add(ConvLSTM2D(64, (3, 3), padding="same", return_sequences=True))
    seq.add(tf.keras.layers.LayerNormalization())
    # # # # #
    seq.add(
        TimeDistributed(Conv2DTranspose(64, (5, 5), strides=2,
                                        padding="same")))
    seq.add(tf.keras.layers.LayerNormalization())
    seq.add(
        TimeDistributed(
            Conv2DTranspose(128, (11, 11), strides=4, padding="same")))
    seq.add(tf.keras.layers.LayerNormalization())
    seq.add(
        TimeDistributed(
            Conv2D(1, (11, 11), activation="sigmoid", padding="same")))
    print(seq.summary())
    seq.compile(loss='mse',
                optimizer=tf.keras.optimizers.Adam(lr=1e-4,
                                                   decay=1e-5,
                                                   epsilon=1e-6))
    seq.fit(training_set,
            training_set,
            batch_size=BATCH_SIZE,
            epochs=EPOCHS,
            shuffle=False)
    seq.save(MODEL_PATH)
    return seq
Ejemplo n.º 22
0
def att_multistream():  #get_new_model4
    inputs = []
    convs = []
    kernel_size = 7
    window_size = 5
    lags_per_input = 2
    number_features = 18
    number_cities = 18
    number_target_cities = 6
    filters_convlstm1 = 4
    filters_convlstm2 = 4
    dense_nodes = 50

    for i in range(window_size):
        input_layer = Input(shape=(lags_per_input, number_features,
                                   number_cities, 1),
                            name="input" + str(i + 1))
        inputs.append(input_layer)

    for i in range(window_size):
        block1 = ConvLSTM2D(filters_convlstm1, (kernel_size, kernel_size),
                            padding='same',
                            return_sequences=True,
                            activation='relu',
                            data_format="channels_last")(inputs[i])
        block1 = BatchNormalization()(block1)
        block1 = ConvLSTM2D(filters_convlstm2, (kernel_size, kernel_size),
                            padding='same',
                            return_sequences=False,
                            activation='relu',
                            data_format="channels_last")(block1)
        block1 = BatchNormalization()(block1)
        convs.append(block1)

    merge = concatenate(convs, axis=-1, name="merge")
    reshaped_layer = Reshape((324, 20))(merge)
    num_heads = 1
    encoder = Encoder(num_layers=1,
                      d_model=20,
                      num_heads=num_heads,
                      dff=64,
                      input_vocab_size=8500,
                      maximum_position_encoding=100)
    enc_layer = encoder(reshaped_layer)

    flat = Flatten()(enc_layer)
    flat2 = BatchNormalization()(flat)

    dense3 = Dense(dense_nodes, activation="linear", name="dense2")(flat2)

    output = Dense(number_target_cities, activation="linear",
                   name="dense3")(dense3)
    model = Model(inputs=inputs, outputs=output)

    return model
Ejemplo n.º 23
0
def get_model():
    input_layer = Input(shape=(SEQ_LEN, DIM[1], DIM[0], DIM[2]),
                        name='input_seq')

    enc1 = TimeDistributed(Conv2D(128, (3, 3), strides=1,
                                  padding='same'))(input_layer)
    enc1 = TimeDistributed(AveragePooling2D())(enc1)
    enc1 = BatchNormalization()(enc1)
    enc1 = LeakyReLU()(enc1)

    enc2 = TimeDistributed(Conv2D(64, (3, 3), strides=1, padding='same'))(enc1)
    enc2 = TimeDistributed(AveragePooling2D())(enc2)
    enc2 = BatchNormalization()(enc2)
    enc2 = LeakyReLU()(enc2)

    btk = ConvLSTM2D(64, (3, 3), padding='same', return_sequences=True)(enc2)
    btk = BatchNormalization()(btk)
    btk = ConvLSTM2D(64, (3, 3), padding='same')(btk)
    btk = BatchNormalization()(btk)

    dec_pred = UpSampling2D()(btk)
    dec_pred = Conv2DTranspose(32, (3, 3), strides=1, padding='same')(dec_pred)
    dec_pred = BatchNormalization()(dec_pred)
    dec_pred = LeakyReLU()(dec_pred)

    skip_con = Concatenate(axis=-1)([btk, enc2[:, -1, :, :]])
    dec_optf = UpSampling2D()(skip_con)
    dec_optf = Conv2DTranspose(32, (3, 3), strides=1, padding='same')(dec_optf)
    dec_optf = BatchNormalization()(dec_optf)
    dec_optf = LeakyReLU()(dec_optf)

    dec_pred = UpSampling2D()(dec_pred)
    dec_pred = Conv2DTranspose(64, (3, 3), strides=1, padding='same')(dec_pred)
    dec_pred = BatchNormalization()(dec_pred)
    dec_pred = LeakyReLU()(dec_pred)

    skip_con = Concatenate(axis=-1)([dec_optf, enc1[:, -1, :, :]])
    dec_optf = UpSampling2D()(skip_con)
    dec_optf = Conv2DTranspose(64, (3, 3), strides=1, padding='same')(dec_optf)
    dec_optf = BatchNormalization()(dec_optf)
    dec_optf = LeakyReLU()(dec_optf)

    out_pred = Conv2D(DIM[-1], (1, 1), activation='sigmoid',
                      name='prediction')(dec_pred)
    out_optf = Conv2D(3, (1, 1), activation='sigmoid',
                      name='optical_flow')(dec_optf)

    model = Model(inputs=input_layer,
                  outputs=[out_pred, out_optf],
                  name='autoencoder')
    model.summary()
    # plot_model(model, show_shapes=True, show_layer_names=False)
    return model
Ejemplo n.º 24
0
    def model_function(num_channels=1):
        new_shape = (None, the_shape[0], the_shape[1], num_channels)
       
        #new_shape_conv10 = (num_steps, the_shape[0]-conv_window, the_shape[1]-conv_window, 1)
        kern_size = (5, 5)
        model = Sequential()
        
        model.add(ConvLSTM2D(filters=num_channels, kernel_size=kern_size,
                             data_format='channels_last', 
                             recurrent_activation='hard_sigmoid',
                             activation='tanh', padding='same',
                             return_sequences=True,
                             input_shape=new_shape))
               
                             
        model.add(BatchNormalization())
#        max_pool1 = MaxPooling3D(pool_size=(1, 2, 2))(batch_norm1)
        
        model.add(ConvLSTM2D(filters=num_channels, kernel_size=kern_size,
                             data_format='channels_last',
                             recurrent_activation='hard_sigmoid',
                             activation='tanh', padding='same',
                             return_sequences=True,
                             ))
               
        model.add(BatchNormalization())
		#max_pool2 = MaxPooling3D(pool_size=(1, 2, 2))(batch_norm2)

        model.add(ConvLSTM2D(filters=num_channels, kernel_size=kern_size,
                             data_format='channels_last',
                             recurrent_activation='hard_sigmoid',
                             activation='tanh', padding='same',
                             return_sequences=False))        
        model.add(BatchNormalization())
        #model.add(Dropout(0.3))
        #model.add(ConvLSTM2D(filters=num_channels, kernel_size=kern_size,
        #                     data_format='channels_last',
        #                     recurrent_activation='hard_sigmoid',
        #                     activation='tanh', padding='same',
        #                     return_sequences=False))
       
        model.add(BatchNormalization())

        
        model.add(Conv2D(filters=1, kernel_size=(1, 1), padding='same',
                  data_format='channels_last', activation='sigmoid'))
        #odel.add(ConvLSTM2D(1, 10, input_shape=new_shape_conv10, data_format='channels_last', return_sequences=True))
        #if use_dropout:
        #    model.add(Dropout(0.5))
        #model = Model(inputs=my_input, outputs=output_layer, name='NowcastDNN') 
        model.compile(loss='mean_squared_error', optimizer='SGD')  
        return model
Ejemplo n.º 25
0
    def ModelJustConv(self):
        inp = Input(shape=self.inputShape, name='Sentinel')
        # inp_geo = Input(shape=(1,), name='Geohash')
        # inp_woy = Input(shape=(1,), name='WOY')
        # inp_season = Input(shape=(1,), name='SeasonOfTheYear')
        # modisT = Input(shape=(16, 16, 3), name="ModisTile")

        #Here Sentinel-2 is preprocessed
        model = TimeDistributed(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   padding="same"))(inp)
        model = LeakyReLU(0.2)(model)
        model = Bidirectional(ConvLSTM2D(filters=8,
                                         kernel_size=(3, 3),
                                         padding='same',
                                         return_sequences=True),
                              name='LstmLayer256')(model)

        model = TimeDistributed(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   strides=(1, 1),
                   padding="same"))(model)
        model = LeakyReLU(0.2)(model)
        model = Bidirectional(ConvLSTM2D(filters=8,
                                         kernel_size=(3, 3),
                                         padding='same',
                                         return_sequences=False),
                              name='LstmLayer128')(model)

        model = Conv2D(32,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       activation='tanh')(model)
        model = Conv2D(16,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       activation='tanh')(model)
        model = Conv2D(3,
                       kernel_size=(1, 1),
                       strides=(1, 1),
                       activation='tanh')(model)

        genModel = Model(inputs=inp, outputs=model, name='Generator')
        # genModel.compile(loss='mse', optimizer=self.lstmOp, experimental_run_tf_function=False,
        #                  metrics=["mse", "accuracy"])
        # plot_model(genModel, to_file='ModelWithAuxSingleModis.png', rankdir='TB', show_shapes=True, show_layer_names=True)
        # print(genModel.summary())
        return genModel
Ejemplo n.º 26
0
def get_model(reload_model=True):
    """
    Parameters
    ----------
    reload_model : bool
        Load saved model or retrain it
    """
    tensorflow.keras.backend.set_floatx('float32')
    if not reload_model:
        return load_model(
            Config.MODEL_PATH,
            custom_objects={'LayerNormalization': LayerNormalization})
    training_set = get_training_set()
    training_set = np.array(training_set)
    training_set = training_set.reshape(-1, 10, imsize, imsize, 1)
    seq = Sequential()
    seq.add(
        TimeDistributed(Conv2D(128, (11, 11), strides=4, padding="same"),
                        batch_input_shape=(None, 10, imsize, imsize, 1)))
    seq.add(LayerNormalization())
    seq.add(TimeDistributed(Conv2D(64, (5, 5), strides=2, padding="same")))
    seq.add(LayerNormalization())
    # # # # #
    seq.add(ConvLSTM2D(64, (3, 3), padding="same", return_sequences=True))
    seq.add(LayerNormalization())
    seq.add(ConvLSTM2D(32, (3, 3), padding="same", return_sequences=True))
    seq.add(LayerNormalization())
    seq.add(ConvLSTM2D(64, (3, 3), padding="same", return_sequences=True))
    seq.add(LayerNormalization())
    # # # # #
    seq.add(
        TimeDistributed(Conv2DTranspose(64, (5, 5), strides=2,
                                        padding="same")))
    seq.add(LayerNormalization())
    seq.add(
        TimeDistributed(
            Conv2DTranspose(128, (11, 11), strides=4, padding="same")))
    seq.add(LayerNormalization())
    seq.add(
        TimeDistributed(
            Conv2D(1, (11, 11), activation="sigmoid", padding="same")))
    print(seq.summary())
    seq.compile(loss='mse', optimizer=Adam(lr=1e-4, decay=1e-5, epsilon=1e-6))
    #seq.compile(loss='mse')
    seq.fit(training_set,
            training_set,
            batch_size=Config.BATCH_SIZE,
            epochs=Config.EPOCHS,
            shuffle=False)
    seq.save(Config.MODEL_PATH)
    return seq
Ejemplo n.º 27
0
def residual_convLSTM2D_block(x, filters, num_class, rd=0.1):

    x = Conv2D(num_class,
               kernel_size=(1, 1),
               padding="same",
               strides=1,
               kernel_initializer=he_normal(seed=5),
               bias_initializer='zeros')(x)
    x = LeakyReLU(alpha=0.1)(x)

    o2 = Lambda(lambda x: x[:, :, :, :, tf.newaxis])(x)
    o3 = Bidirectional(
        ConvLSTM2D(filters=filters,
                   kernel_size=(3, num_class),
                   padding='same',
                   kernel_initializer=he_normal(seed=5),
                   recurrent_initializer=orthogonal(gain=1.0, seed=5),
                   activation='tanh',
                   return_sequences=True,
                   recurrent_dropout=rd))(o2)

    o2t = tf.transpose(o2, perm=[0, 2, 1, 3, 4])

    o3t = Bidirectional(
        ConvLSTM2D(filters=filters,
                   kernel_size=(3, num_class),
                   padding='same',
                   kernel_initializer=he_normal(seed=5),
                   recurrent_initializer=orthogonal(gain=1.0, seed=5),
                   activation='tanh',
                   return_sequences=True,
                   recurrent_dropout=rd))(o2t)

    o3t = tf.transpose(o3t, perm=[0, 2, 1, 3, 4])

    o4 = Add()([o3, o3t])
    res = tf.reduce_sum(o4, axis=-1)

    shortcut = Conv2D(num_class,
                      kernel_size=(1, 1),
                      padding='same',
                      strides=1,
                      kernel_initializer=he_normal(seed=5),
                      bias_initializer='zeros')(x)

    shortcut = LeakyReLU(alpha=0.1)(shortcut)

    output = Add()([x, res])

    return output
Ejemplo n.º 28
0
def integrated_img(ch_dim):
    #Compute intergrate features (currently 5D tensor)
    rendered_feat = Input(shape=(None,None,None,ch_dim))    
    #accumulate renderings from different poses.
    lstm1 = ConvLSTM2D(32,kernel_size = (3, 3),padding='same',return_sequences = True)(rendered_feat)
    lstm1 = ConvLSTM2D(32,kernel_size = (3, 3),padding='same',return_sequences = True)(lstm1)
    lstm_result = ConvLSTM2D(16,kernel_size = (3, 3),padding='same')(lstm1) #pose x H x W x 16
    
    lstm_feature = Lambda(lambda x : tf.tile(tf.expand_dims(x[0],axis=1),
                                            [1,tf.shape(x[1])[1],1,1,1]))\
                                            ([lstm_result,rendered_feat])
    compare_featues = Concatenate()([rendered_feat,lstm_feature]) #[Pose X Batch X H X W x 31] 
    
    return Model(inputs=[rendered_feat],outputs=[compare_featues])
Ejemplo n.º 29
0
def compile_model(input_timesteps: int = 1,
                  output_timesteps: int = 1,
                  categories: int = 3,
                  loss_function: str = "categorical_crossentropy",
                  initial_learning_rate: float = 0.001,
                  decay_steps: int = 1000,
                  decay_rate: float = 0.95,
                  metrics: list = [],
                  **kwargs):
    model = Sequential()
    model.add(Conv3D(filters=16, kernel_size=(3, 3, 3), padding="same"))
    model.add(
        Bidirectional(
            ConvLSTM2D(filters=16,
                       kernel_size=(3, 3),
                       return_sequences=True,
                       padding="same",
                       dropout=0.1)))
    model.add(BatchNormalization())
    model.add(
        Bidirectional(
            ConvLSTM2D(filters=32,
                       kernel_size=(3, 3),
                       return_sequences=True,
                       padding="same",
                       dropout=0.5)))
    model.add(BatchNormalization())
    model.add(
        Bidirectional(
            ConvLSTM2D(filters=32,
                       kernel_size=(3, 3),
                       return_sequences=True,
                       padding="same",
                       dropout=0.5)))
    model.add(BatchNormalization())
    model.add(
        Conv3D(filters=categories,
               kernel_size=(input_timesteps - output_timesteps + 1, 1, 1),
               activation="softmax",
               padding="valid"))

    optimizer = Adam(learning_rate=ExponentialDecay(
        initial_learning_rate=initial_learning_rate,
        decay_steps=decay_steps,
        decay_rate=decay_rate))
    model.compile(loss=loss_function, optimizer=optimizer, metrics=metrics)

    return model
Ejemplo n.º 30
0
def construct_model():
    input1 = Input(shape=(seq_length, 5790, 6))
    input2 = Input(shape=(seq_length, 6))

    re_input1 = Reshape((5790 * seq_length, 6))(input1)

    rere_input1 = Permute((2, 1), input_shape=(5790 * seq_length, 6))(re_input1)

    conv1 = Conv1D(30, 1, strides=1, padding='valid', activation='relu', data_format="channels_first", name='X1_input')(
        rere_input1)
    conv2 = Conv1D(30, 1, strides=1, padding='valid', activation='relu', data_format="channels_first", name='Conv7')(
        conv1)

    LSTM1 = LSTM(4, return_sequences=True)(input2)
    LSTM2 = LSTM(4, return_sequences=False)(LSTM1)

    reshape_conv2 = Reshape((30, 5790, seq_length))(conv2)

    pool = MaxPooling2D(pool_size=(1, 2), strides=(1, 2), padding='valid', data_format="channels_last")(reshape_conv2)

    reshape1 = Reshape((1, 30, 2895, seq_length))(pool)
    reshape2 = Permute((4, 2, 3, 1), input_shape=(1, 30, 2895, seq_length))(reshape1)

    convLSTM1 = ConvLSTM2D(filters=10, kernel_size=(3, 3), strides=(3, 3),
                           padding='same', return_sequences=True)(reshape2)
    convLSTM2 = ConvLSTM2D(filters=20, kernel_size=(3, 2), strides=(2, 2),
                           padding='same', return_sequences=True)(convLSTM1)
    convLSTM3 = ConvLSTM2D(filters=40, kernel_size=(3, 1), strides=(2, 2),
                           padding='same', return_sequences=True)(convLSTM2)
    convLSTM4 = ConvLSTM2D(filters=40, kernel_size=(2, 2), strides=(2, 2),
                           padding='same', return_sequences=False)(convLSTM3)

    flat1 = Flatten()(convLSTM4)
    flat2 = Flatten()(LSTM2)

    dense1 = Dense(120)(flat1)
    activation1 = Activation('relu')(dense1)
    merge2 = concatenate([activation1, flat2])
    dense2 = Dense(30)(merge2)
    activation2 = Activation('relu')(dense2)
    output = Dense(1, kernel_regularizer=regularizers.l2(0.000001))(activation2)

    model = Model(inputs=[input1, input2], outputs=[output])
    sgd = optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
    model = multi_gpu_model(model, gpus=2)
    model.compile(loss='mean_squared_error', optimizer=sgd)
    print(model.summary())
    return model