Ejemplo n.º 1
0
def main():

    datetime_folder = input(
        "Enter name (datetime) of folder containing predictions located in resources/prediction (format: 20180910.0817)"
        + os.linesep) + os.sep
    # datetime_folder = '20181025.2126' + os.sep

    model_folder = input(
        "Enter name of subfolder containing prediction for model for specific epoch located in resources/prediction/datetime (format: model.epoch-03-val_loss-0.0025.hdf5)"
        + os.linesep) + os.sep
    # model_folder = 'model.epoch-02-val_loss-0.0008.hdf5' + os.sep

    prediction_folder = os.path.join(paths.PREDICTION, datetime_folder,
                                     model_folder)

    validation_movies_tuple = ms.MoviesTuple(
        path=paths.DATA_FOR_VALIDATION, movies_params=ms.get_movies_params())
    validation_movies_tuple.load_movies()
    validation_movies_tuple.normalize()

    prediction_movies_nn = ms.Movies(path=prediction_folder,
                                     movies_params=ms.get_movies_params())
    prediction_movies_nn.load_movies()
    prediction_movies_nn.normalize()

    prediction_movies_chmu = ms.Movies(path=paths.PREDICTION_CHMU_VALIDATION,
                                       movies_params=ms.get_movies_params())
    prediction_movies_chmu.load_movies()
    prediction_movies_chmu.normalize()
    # TODO chmu predictionvalidation_movies_tuple

    show_comparison(validation_movies_tuple, prediction_movies_nn,
                    prediction_movies_chmu)
Ejemplo n.º 2
0
def main():

    # set parameters
    # movies_tuple = ms.MoviesTuple(path=paths.DATA_FOR_TRAINING, movies_params=ms.get_movies_params())
    # movies_tuple.load_movies()
    # movies_tuple.normalize()
    # predict_from_model(movies_tuple)
    # movies_tuple = ms.MoviesTuple(path=paths.DATA_FOR_VALIDATION, movies_params=ms.get_movies_params())
    # movies_tuple.load_movies()
    # movies_tuple.normalize()
    batch_size = 1
    validation_generator = DataGenerator(path=paths.DATA_FOR_TESTING,
                                         params=ms.get_movies_params(),
                                         batch_size=batch_size)

    predict_from_model(validation_generator)
Ejemplo n.º 3
0
def main():
    # datetime_folder = input("Enter name (datetime) of folder containing predictions located in resources/prediction (format: 20180910.0817)" + os.linesep) + os.sep
    # model_folder = input("Enter name of subfolder containing prediction for model for specific epoch located in resources/prediction/datetime (format: model.epoch-03-val_loss-0.0025.hdf5)" + os.linesep) + os.sep
    # prediction_folder = os.path.join(paths.PREDICTION, datetime_folder, model_folder)

    # change in paths what data evaluate
    path_model = '/home/vladka/Desktop/DP/Projects/xhezelov_nowcasting/trunk/resources/models/20181122.1313/model.epoch-100-val_loss-0.0004.hdf5'

    batch_size = 1
    validation_generator = DataGenerator(path=paths.DATA_FOR_TESTING,
                                         params=ms.get_movies_params(),
                                         batch_size=batch_size)
    loaded_model = load_model(path_model,
                              custom_objects={'weighted_mse': weighted_mse})
    test_loss = loaded_model.evaluate_generator(generator=validation_generator)

    print(test_loss)
Ejemplo n.º 4
0
    "Enter name of subfolder containing prediction for model for specific epoch located in resources/prediction/datetime (format: model.epoch-03-val_loss-0.0025.hdf5)"
    + os.linesep) + os.sep
#model_folder = 'model.epoch-10-val_loss-0.0008.hdf5' + os.sep

predictionPath = os.path.join(paths.PREDICTION, datetime_folder, model_folder)

inputPath = '..' + os.sep + paths.DATA_FOR_TESTING

save_path = '..' + os.sep + paths.PREDICTION_STRIPS + datetime_folder

if not os.path.exists(save_path):
    os.makedirs(save_path)
if not os.path.exists(save_path + model_folder):
    os.makedirs(save_path + model_folder)

params = ms.get_movies_params()
rows = params.frame_dims.rows
cols = params.frame_dims.cols
small_padding = np.full((rows, 3), 127.0)
large_padding = np.full((rows, 10), 127.0)

inpuFolders = get_all_subfolders(inputPath)
inpuFolders.sort()
predictionFolders = get_all_subfolders("../" + predictionPath)
predictionFolders.sort()

sess = tf.Session()
sess.run(tf.global_variables_initializer())

for i in range(len(inpuFolders)):
    inputImages = glob.glob(inpuFolders[i] + "*.png")
Ejemplo n.º 5
0
def get_model():

    movies_params = ms.get_movies_params()
    rows = movies_params.frame_dims.rows
    cols = movies_params.frame_dims.cols
    inputs = Input(shape=(None, rows, cols, 1))

    # ARCHITECTURE UNITOS4
    conv1 = ConvLSTM2D(filters=32,
                       kernel_size=(3, 3),
                       padding='same',
                       strides=1,
                       return_sequences=True)(inputs)
    conv1 = BatchNormalization()(conv1)
    pool1 = TimeDistributed(
        MaxPooling2D(pool_size=(2, 2),
                     strides=None,
                     padding='valid',
                     data_format=None))(conv1)

    conv2 = ConvLSTM2D(filters=64,
                       kernel_size=(3, 3),
                       padding='same',
                       strides=1,
                       return_sequences=True)(pool1)
    conv2 = BatchNormalization()(conv2)
    pool2 = TimeDistributed(
        MaxPooling2D(pool_size=(2, 2),
                     strides=None,
                     padding='valid',
                     data_format=None))(conv2)

    conv3 = ConvLSTM2D(filters=128,
                       kernel_size=(3, 3),
                       padding='same',
                       strides=1,
                       return_sequences=True)(pool2)
    conv3 = BatchNormalization()(conv3)
    pool3 = TimeDistributed(
        MaxPooling2D(pool_size=(2, 2),
                     strides=None,
                     padding='valid',
                     data_format=None))(conv3)

    conv4 = ConvLSTM2D(filters=256,
                       kernel_size=(3, 3),
                       padding='same',
                       strides=1,
                       return_sequences=True)(pool3)
    conv4 = BatchNormalization()(conv4)

    up5 = TimeDistributed(UpSampling2D(size=(2, 2), data_format=None))(conv4)
    conv5 = TimeDistributed(
        Conv2D(filters=128,
               kernel_size=(2, 2),
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))(up5)
    conv5 = BatchNormalization()(conv5)

    merge5 = concatenate([conv3, conv5])
    conv6 = ConvLSTM2D(filters=128,
                       kernel_size=(3, 3),
                       padding='same',
                       strides=1,
                       return_sequences=True)(merge5)
    conv6 = BatchNormalization()(conv6)

    up7 = TimeDistributed(UpSampling2D(size=(2, 2), data_format=None))(conv6)
    conv7 = TimeDistributed(
        Conv2D(filters=64,
               kernel_size=(2, 2),
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))(up7)
    conv7 = BatchNormalization()(conv7)

    merge7 = concatenate([conv2, conv7])
    conv8 = ConvLSTM2D(filters=64,
                       kernel_size=(3, 3),
                       padding='same',
                       strides=1,
                       return_sequences=True)(merge7)
    conv8 = BatchNormalization()(conv8)

    up9 = TimeDistributed(UpSampling2D(size=(2, 2), data_format=None))(conv8)
    conv9 = TimeDistributed(
        Conv2D(filters=32,
               kernel_size=(2, 2),
               activation='sigmoid',
               padding='same',
               data_format='channels_last'))(up9)
    conv9 = BatchNormalization()(conv9)

    merge9 = concatenate([conv1, conv9])
    conv10 = ConvLSTM2D(filters=32,
                        kernel_size=(3, 3),
                        padding='same',
                        strides=1,
                        return_sequences=False)(merge9)
    conv10 = BatchNormalization()(conv10)

    outputs = Conv2D(filters=1,
                     kernel_size=(3, 3),
                     activation='sigmoid',
                     padding='same',
                     data_format='channels_last')(conv10)

    # ARCHITECTURE UNITOS3
    #
    # conv1 = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same', strides=1, return_sequences=True)(inputs)
    # conv1 = BatchNormalization()(conv1)
    # pool1 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None))(conv1)
    #
    # conv2 = ConvLSTM2D(filters=64, kernel_size=(3, 3), padding='same', strides=1, return_sequences=True)(pool1)
    # conv2 = BatchNormalization()(conv2)
    # pool2 = TimeDistributed(MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None))(conv2)
    #
    # conv3 = ConvLSTM2D(filters=128, kernel_size=(3, 3), padding='same', strides=1, return_sequences=True)(pool2)
    # conv3 = BatchNormalization()(conv3)
    #
    # up4 = TimeDistributed(UpSampling2D(size=(2, 2), data_format=None))(conv3)
    # conv4 = TimeDistributed(Conv2D(filters=64, kernel_size=(2, 2), activation='sigmoid', padding='same', data_format='channels_last'))(up4)
    # conv4 = BatchNormalization()(conv4)
    #
    # merge4 = concatenate([conv2, conv4])
    # conv5 = ConvLSTM2D(filters=64, kernel_size=(3, 3), padding='same', strides=1, return_sequences=True)(merge4)
    # conv5 = BatchNormalization()(conv5)
    #
    # up6 = TimeDistributed(UpSampling2D(size=(2, 2), data_format=None))(conv5)
    # conv6 = TimeDistributed(Conv2D(filters=32, kernel_size=(2, 2), activation='sigmoid', padding='same', data_format='channels_last'))(up6)
    # conv6 = BatchNormalization()(conv6)
    #
    # merge6 = concatenate([conv1, conv6])
    # conv7 = ConvLSTM2D(filters=32, kernel_size=(3, 3), padding='same', strides=1, return_sequences=False)(merge6)
    # conv7 = BatchNormalization()(conv7)
    #
    # outputs = Conv2D(filters=1, kernel_size=(3, 3), activation='sigmoid', padding='same', data_format='channels_last')(conv7)

    # ARCHITECTURE PRIMITOS
    #
    # layer_1_filters = 16
    # layer_1_kernel_size = (3, 3)
    # layer_1_strides = (1, 1)
    #
    # outputs = ConvLSTM2D(filters=layer_1_filters, kernel_size=layer_1_kernel_size,
    #                      padding='same', strides=layer_1_strides, return_sequences=True)(inputs)
    #
    # print("---------------------------------")
    # print("---------------------------------")
    # print("ConvLSTM2D:")
    # print("layer_1_filters = ", layer_1_filters)
    # print("layer_1_kernel_size = ", layer_1_kernel_size)
    # print("layer_1_strides = ", layer_1_strides)
    #
    # outputs = BatchNormalization()(outputs)
    #
    # layer_2_filters = 32
    # layer_2_kernel_size = (5, 5)
    # layer_2_strides = (1, 1)
    #
    # outputs = ConvLSTM2D(filters=layer_2_filters, kernel_size=layer_2_kernel_size,
    #                      padding='same', strides=layer_2_strides, return_sequences=True)(outputs)
    #
    # print("---------------------------------")
    # print("ConvLSTM2D:")
    # print("layer_2_filters = ", layer_2_filters)
    # print("layer_2_kernel_size = ", layer_2_kernel_size)
    # print("layer_2_strides = ", layer_2_strides)
    #
    # outputs = BatchNormalization()(outputs)
    #
    # layer_3_filters = 64
    # layer_3_kernel_size = (9, 9)
    # layer_3_strides = (1, 1)
    #
    # outputs = ConvLSTM2D(filters=layer_3_filters, kernel_size=layer_3_kernel_size,
    #                      padding='same', strides=layer_3_strides, return_sequences=False)(outputs)
    #
    # print("---------------------------------")
    # print("ConvLSTM2D:")
    # print("layer_3_filters = ", layer_3_filters)
    # print("layer_3_kernel_size = ", layer_3_kernel_size)
    # print("layer_3_strides = ", layer_3_strides)
    #
    # outputs = BatchNormalization()(outputs)
    # #
    # # outputs = ConvLSTM2D(filters=32, kernel_size=(3, 3),
    # #                      padding='same', strides=layer_3_strides, return_sequences=False)(outputs)
    # #
    # # print("---------------------------------")
    # # print("ConvLSTM2D:")
    # # print("layer_3_filters = ", layer_3_filters)
    # # print("layer_3_kernel_size = ", layer_3_kernel_size)
    # # print("layer_3_strides = ", layer_3_strides)
    # #
    # # outputs = BatchNormalization()(outputs)
    #
    # layer_4_filters = 1
    # layer_4_kernel_size = (3, 3)
    # activation = 'sigmoid'
    #
    # outputs = Conv2D(filters=layer_4_filters, kernel_size=layer_4_kernel_size,
    #                  activation=activation,
    #                  padding='same', data_format='channels_last')(outputs)
    #
    # print("---------------------------------")
    # print("Conv3D:")
    # print("layer_4_filters = ", layer_4_filters)
    # print("layer_4_kernel_size = ", layer_4_kernel_size)
    # print("Activation: ", activation)

    model_conv_lstm = Model(inputs, outputs)

    loss = "mean_squared_error"
    # optimizer = 'adadelta'
    # optimizer = 'rmsprop'
    # optimizer = optimizers.RMSProp(lr=0.01, rho=0.9, epsilon=None, decay=0.0)
    optimizer = optimizers.Adam(lr=0.01,
                                beta_1=0.9,
                                beta_2=0.999,
                                epsilon=None,
                                decay=0.0,
                                amsgrad=False)
    metrics = ['accuracy', 'mean_squared_error']

    # model_conv_lstm.compile(loss=my_mean_squared_error, optimizer=optimizer, metrics=metrics)
    # model_conv_lstm.compile(loss=[weighted_mse(size=rows, padding=1)], optimizer=optimizer)
    # model_conv_lstm.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[weighted_mse])

    model_conv_lstm.compile(loss=weighted_mse, optimizer=optimizer)

    print("---------------------------------")
    print("Loss: ", loss)
    print("Optimizer: ", optimizer)
    print("Metrics: ", metrics)
    print("Padding is 'same' in every layer")
    print("---------------------------------")
    print("---------------------------------")
    """
    Write summary of model
    """
    model_conv_lstm.summary()
    """
    Plot model into graph and save it into paths.MODEL_GRAPH
    """
    # plot_model(model_conv_lstm, to_file=paths.MODEL_GRAPH, show_shapes=True)

    return model_conv_lstm
Ejemplo n.º 6
0
def main():

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.5
    set_session(tf.Session(config=config))

    paths_MODEL_FOLDER = paths.create_folder_with_timestamp(
        paths.MODELS)  # 'resources/models/20181023.2304/'
    paths_MODEL = os.path.join(
        paths_MODEL_FOLDER, "ConvLSTM_model.h5"
    )  # 'resources/models/20181023.2304/ConvLSTM_model.h5'

    paths_LOGS_FOLDER = paths.create_folder_with_timestamp(
        paths.LOGS)  # 'resources/logs_tensorboard/20181023.2304/'
    """ redirect the stdout to file saved in model folder """
    redir = redirection.StdoutRedirection(paths_MODEL_FOLDER)
    redir.redirect_to_file()

    # training_movies_tuple = ms.MoviesTuple(path=paths.DATA_FOR_TRAINING, movies_params=ms.get_movies_params())
    # training_movies_tuple.load_movies_with_weights()
    # training_movies_tuple.normalize()
    # TODO
    # validation_movies_tuple = ms.MoviesTuple(path=paths.DATA_FOR_VALIDATION, movies_params=ms.get_movies_params())
    # validation_movies_tuple.load_movies_with_weights()
    # validation_movies_tuple.normalize()

    conv_lstm_model = model_convLSTM.get_model()

    # TRAIN THE MODEL

    batch_size = 4
    epochs = 1000
    """ period: how often (after how many epochs) save model """
    period = 10
    print("---------------------------------")
    print("Batch size: ", batch_size)
    print("Epochs: ", epochs)
    print("---------------------------------")
    # print("Train on ", training_movies_tuple.movies_params.samples_ratio.n_samples_loaded)
    # print("Validate on ", validation_movies_tuple.movies_params.samples_ratio.n_samples_loaded)
    print("---------------------------------")
    """ callback for training process"""
    # tensorboard_callback = keras.callbacks.TensorBoard(log_dir=paths_LOGS_FOLDER, histogram_freq=0, batch_size=batch_size, write_graph=True,
    #                                                    write_grads=True, write_images=True, update_freq='batch')
    tensorboard_callback = keras.callbacks.TensorBoard(
        log_dir=paths_LOGS_FOLDER,
        histogram_freq=0,
        batch_size=batch_size,
        write_graph=True,
        write_grads=True,
        write_images=True,
        update_freq='epoch')
    """ callback for saving model after 'period' number of epochs"""
    model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
        filepath=paths_MODEL_FOLDER +
        "model.epoch-{epoch:02d}-val_loss-{val_loss:.4f}.hdf5",
        monitor='val_loss',
        verbose=0,
        save_best_only=False,
        save_weights_only=False,
        mode='auto',
        period=period)
    print_tensorboard_log_info()

    redir.redirect_to_stdout()

    # loss_weights = ms.Movies(path=paths.MASKS, movies_params=ms.get_movies_params())
    # loss_weights.load_movies_loss_weights()
    # loss_weights.normalize()

    training_generator = DataGenerator(path=paths.DATA_FOR_TRAINING,
                                       params=ms.get_movies_params(),
                                       batch_size=batch_size)
    validation_generator = DataGenerator(path=paths.DATA_FOR_VALIDATION,
                                         params=ms.get_movies_params(),
                                         batch_size=batch_size)

    path_model = "resources/models/20181113.2325/ConvLSTM_model.h5"
    loaded_model = load_model(path_model,
                              custom_objects={'weighted_mse': weighted_mse})
    K.set_value(loaded_model.optimizer.lr, 0.0001)
    """ set callbacks=[tbCallBack] for saving logs to show learning progress in tensorboard """
    history = conv_lstm_model.fit_generator(
        generator=training_generator,
        epochs=epochs,
        validation_data=validation_generator,
        verbose=1,
        callbacks=[tensorboard_callback, model_checkpoint_callback],
        shuffle=False)

    # history = conv_lstm_model.fit_generator(generator=training_generator, epochs=epochs,
    #                               validation_data=validation_generator,
    #                               verbose=1, callbacks=[tensorboard_callback, model_checkpoint_callback], shuffle=False)

    # visualization.visualize_training_progress(history)

    # SAVE MODEL
    conv_lstm_model.save(paths_MODEL)
    print("Model is saved in ", paths_MODEL)