Exemple #1
0
def evaluate_model(train, test, n_input, save_model_path, save_plot_path):
    # fit model
    model = build_model(train, n_input)
    # history is a list of yearly data
    history = [x for x in train]
    # walk-forward validation over period
    predictions = list()
    for i in range(len(test)):
        # predict the period
        yhat_sequence = forecast_multichannel(model, history, n_input)
        # store the predictions
        predictions.append(yhat_sequence)
        # get real observation and add to history for predicting the next period
        history.append(test[i, :])
    # get array of predictions
    prediction = np.array(predictions)
    prediction = np.ravel(prediction)
    # get array of actual values from test set
    actual = test[:, :, 0]
    actual_plot = np.ravel(actual)
    actual[actual == 0] = np.nanmean(actual)
    actual = np.ravel(actual)
    # calaculate and print scores
    rmse, mape = calculate_scores(actual, prediction)
    print('RMSE: %.3f' % rmse)
    print('MAPE: %.3f' % mape)
    # save plot in /temp/ path as png file
    save_plot(actual_plot, prediction, save_plot_path)
    plot_prediction(actual_plot, prediction)
    # save model
    model.save(save_model_path)
def evaluate_model(train, test, model):
    n_input = 365
    # fit model
    # model = build_model(train)
    # history is a list of yearly data
    history = [x for x in train]
    # walk-forward validation over each year
    prediction = list()
    for i in range(len(test)):
        # predict the year
        yhat_sequence = forecast(model, history, n_input)
        # store the predictions
        prediction.append(yhat_sequence)
        # get real observation and add to history for predicting the next year
        history.append(test[i, :])
    # get array of predictions
    prediction = np.array(prediction)
    prediction = np.ravel(prediction)
    # get array of actual values from test set
    actual = test[:, :, 0]
    actual[actual == 0] = np.nanmean(actual)
    actual = np.ravel(actual)
    # calaculate and print scores
    rmse, mape = calculate_scores(actual, prediction)
    print('RMSE: %.3f' % rmse)
    print('MAPE: %.3f' % mape)
    # plot prediction
    plot_prediction(actual, prediction)
    # clear keras model
    K.clear_session()
    return mape
    def process_video(self, loader, save_images=True):

        psnr_all = []

        while not loader.is_done():

            input_sequence, gt, gt_path = loader.get_sequence()
            gen = self.g_model(input_sequence)
            psnr_frame = calculate_psnr(gen, gt, val_range=2)
            psnr_all.append(psnr_frame)

            if save_images:
                gen_folder = makedir(join(c.IMG_SAVE_DIR, gt_path.split('/')[-2]))
                gen_path = join(gen_folder, gt_path.split('/')[-1])
                gen_uint8 = float32_to_uint8(gen)
                imsave(gen_path, var2np(gen_uint8))

        psnr_mean = np.mean(psnr_all)

        # Plotting
        video_name = gt_path.split('/')[-2]
        plot_prediction(video_name, psnr_all)

        return psnr_mean
Exemple #4
0
# Load dataset, model and evaluation metric
train_data, test_data, softmax_classifier, accuracy = _initialize(DATA_NAME)
train_x, train_y = train_data
if DATA_NAME == 'Digit':
    train_x, mean_img = train_x

num_data, num_features = train_x.shape
num_label = int(train_y.max()) + 1
print('# of Training data : %d \n' % num_data)

# Make model & optimizer
model = softmax_classifier(num_features, num_label)
optim = optimizer(OPTIMIZER, gamma=gamma, epsilon=epsilon)

# TRAIN
loss = model.train(train_x, train_y, num_epochs, batch_size, learning_rate,
                   optim)
print('Training Loss at last epoch: %.4f' % loss)

# EVALUATION
test_x, test_y = test_data
pred = model.eval(test_x)

acc = accuracy(pred, test_y)
print(OPTIMIZER, ' Accuracy on Test Data : %.2f' % acc)

# Visualize predictions for Digit dataset
if show_plot and DATA_NAME == 'Digit':
    test_x = test_x[:, 1:] + mean_img
    plot_prediction(test_x, pred, test_y)
    loss = ff.CrossEntropyLoss()
else:
    raise ValueError('Unknown loss.')

DATASET_SIZE = 1000

## Generate dataset
train_input, train_target, test_input, test_target, test_input_raw = generate_data(DATASET_SIZE, one_hot=ONE_HOT, normalize=True)

## Create model
model = Net(nb_nodes = 25)
print(model)
if args.no_train:
    ## Load best model
    model.load('../model/best-model.pt')
    model.eval()  # Set model to eval mode
    ## Ploting results of best model
    plot_prediction(test_input, test_input_raw, test_target, model)
    plt.suptitle('Prediction of the best model')
    plt.show()
else:
    print('Using : {}Loss\n'.format(args.loss))
    ## Training model
    model.train_(train_input, train_target, test_input, test_target, epoch=100, eta=1e-1, criterion=loss)
    model.eval()  # Set model to eval mode
    ## Ploting results of model at the end of training
    plot_results(model.sumloss, model.train_error, model.test_error)
    plot_prediction(test_input, test_input_raw, test_target, model)
    plt.suptitle('Prediction of the trained model')
    plt.show()
Exemple #6
0
                                                 look_back,
                                                 look_ahead,
                                                 batch_size=1)

mse_losses = []
mae_losses = []
residuals = []
for data, target in train_loader:
    mse_loss, mae_loss, resid = model.step(data,
                                           target,
                                           predict=True,
                                           scaler=scaler)
    mse_losses.append(mse_loss)
    mae_losses.append(mae_loss)
    residuals.append(resid)

mse_train = np.mean(mse_losses)
mae_train = np.mean(mae_losses)

print('Train (log): RMSE={:.3f}, MAE={:.3f}'.format(np.sqrt(mse_train),
                                                    mae_train))
print('Test:        RMSE={:.3f}, MAE={:.3f}'.format(
    np.sqrt(mean_squared_error(pred_scaled, target_scaled)),
    mean_absolute_error(pred_scaled, target_scaled)))

# plot prediction
residuals_scaled = scaler.inverse_transform(
    np.array(residuals).reshape(-1, 1)).squeeze()
#plot_prediction(target_scaled, pred_scaled, residuals_scaled) does not show the residuals correctly
plot_prediction(target_pred.detach().view(-1, 1).numpy(),
                pred.detach().view(-1, 1).numpy(), residuals)
Exemple #7
0
    nima_net.load_state_dict(torch.load(MODEL_LOAD_PATH))
    print(nima_net)

    # -- evaluation --
    predictions, loss = eval(
        model=nima_net,
        dataloaders=nima_dataloaders,
        criterion=criterion,
        device=DEVICE,
    )
    for phase in TEST_PHASE_LIST:
        predictions[phase].insert(
            loc=0,
            column=AVADataset._label_key_image_id,
            value=ava_datasets[phase].labels[AVADataset._label_key_image_id])

    with open(EVAL_PRED_PATH, "wb") as fw:
        pickle.dump(predictions, fw)
    with open(EVAL_LOSS_PATH, "wb") as fw:
        pickle.dump(loss, fw)

    image = ava_datasets[TEST][0]["image"]
    ground_truth = ava_datasets[TEST][0]["ground_truth"]
    prediction = predictions[TEST].loc[0, AVADataset._label_key_ratings]
    plot_prediction(image=image,
                    ground_truth=ground_truth,
                    prediction=prediction)

    print("predicted average score: {}".format(ave_rating(prediction)))
    print("ground truth score: {}".format(ave_rating(ground_truth)))
Exemple #8
0
def model_processing(X_train, y_train, x_dim, batch_size, batches, epochs,
                     model_dir, model_name):

    #build
    model = eval(model_name)
    model = model(x_dim)

    # compile
    opt = keras.optimizers.Adam(learning_rate=0.01)
    model.compile(loss='mse', optimizer=opt, metrics=['mean_squared_error'])

    #create folder where to save weights. It is deleted in every re-run.
    weights_dir = model_dir + '/weights/'
    try:
        shutil.rmtree(weights_dir)
        os.makedirs(weights_dir)
    except:
        os.makedirs(weights_dir)

    #callbacks
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                  factor=0.5,
                                                  patience=5,
                                                  min_lr=0.001)
    batches_period = 1  #save weights every 1 batch
    loss_hist = LossHistory()
    weight_saver = WeightsSaver(batches_period, weights_dir)
    callbacks = [reduce_lr, weight_saver, loss_hist]

    # fit
    history_PerEpoch = model.fit(X_train,
                                 y_train,
                                 validation_data=(X_train, y_train),
                                 epochs=epochs,
                                 callbacks=callbacks,
                                 shuffle=False,
                                 batch_size=batch_size)

    # log metrics and evaluation
    history_PerEpoch = set_index_intervals(
        pd.DataFrame(history_PerEpoch.history), batches, epochs)
    history_PerBatch = pd.DataFrame(loss_hist.losses)
    eval_PerEpoch_OnWholeSet, eval_PerBatch_OnWholeSet, eval_PerBatch_OnEachBatch = evaluation(
        X_train, y_train, batch_size, batches, epochs, model, weights_dir)
    mean_loss_per_epoch = mean_loss(eval_PerBatch_OnEachBatch, batches, epochs)

    column_names = [
        'log per epoch (train)', 'log per epoch (val)', 'learning rate',
        'log per batch (train)', 'eval per epoch',
        'eval per batch on whole training set',
        'eval per batch on each corresponding batch',
        'eval per batch on each corresponding batch (mean per epoch)'
    ]
    loss_all = pd.concat([
        history_PerEpoch[['loss', 'val_loss', 'lr']], history_PerBatch,
        eval_PerEpoch_OnWholeSet, eval_PerBatch_OnWholeSet,
        eval_PerBatch_OnEachBatch, mean_loss_per_epoch
    ],
                         axis=1,
                         ignore_index=False)
    loss_all.columns = column_names
    loss_all.to_csv(model_dir + '/loss_values.csv')

    # prediction
    y_pred = model.predict(X_train)

    # plot
    plot_loss(batches, epochs, column_names, loss_all, model_dir, model_name)

    plot_prediction(y_pred, y_train, model_dir, model_name, batches)

    # reset weights
    keras.backend.clear_session()

    return loss_all
Exemple #9
0
y_test_predicted = y_test_p

#%%
for k in range(1, num_steps_to_predict):
    x_encoder_test = y_test_p
    y_test_p = model.predict([x_encoder_test, x_decoder_test])
    y_test_predicted = np.hstack((y_test_predicted, y_test_p))
    k = k + target_sequence_length

#%%
indices = np.random.choice(range(x_encoder_test.shape[0]),
                           replace=False,
                           size=10)

for index in indices:
    plot_prediction(x_encoder_test[index, :, :], y_test[index, :, :],
                    y_test_predicted[index, :, :])

#%%
encoder_predict_model = keras.models.Model(encoder_inputs, encoder_states)

decoder_states_inputs = []

# Read layers backwards to fit the format of initial_state
# For some reason, the states of the model are order backwards (state of the first layer at the end of the list)
# If instead of a GRU you were using an LSTM Cell, you would have to append two Input tensors since the LSTM has 2 states.
for hidden_neurons in layers[::-1]:
    # One state for GRU
    decoder_states_inputs.append(keras.layers.Input(shape=(hidden_neurons, )))

decoder_outputs_and_states = decoder(decoder_inputs,
                                     initial_state=decoder_states_inputs)
# uncomment to see model summary
#model.summary()

# Create model using optimizer Adam and loss function mean square error
model.compile(optimizer='adam', loss='mse')
# Checkpoint for saving the weights whenever validation loss improves
chkpnt = ModelCheckpoint(filepath='model.h5',
                         verbose=1,
                         save_best_only=True,
                         monitor='val_loss')
# Stop training when validation loss fails to decrease after 3 epochs
stop = EarlyStopping(monitor='val_loss', patience=3, verbose=1)

#############################################################
#                   Traninig Process                        #
#############################################################
try:
    # Train model for a validation set of 20%.
    model.fit(X_train,y_train, nb_epoch=epochs,verbose=1,batch_size=batch_size,\
             shuffle=True,validation_split=0.2,\
            callbacks=[ chkpnt, stop])
    # Save model to file model.json
    utils.save_model(model)
    # Plot regression using testing set
    utils.plot_prediction(model, X_test, y_test)

except KeyboardInterrupt:
    # incase of interruption save model and plot regression
    utils.save_model(model)
    utils.plot_prediction(model, X_test, y_test)
Exemple #11
0
test_folder = sys.argv[2]
suffix_name = Prediction.suffix_name
test_fileList = []

Prediction.getFileList(test_folder, test_fileList)

testSeries = list(map(load.loadFile, test_fileList))
testSeries, testEffDataLen = Prediction.align(testSeries)

test_input_data = np.asarray(testSeries, dtype=testSeries[0].dtype)

for seq_index in range(len(test_fileList)):
    # Take one sequence (part of the training set)
    # for trying out decoding.
    input_seq = test_input_data[seq_index:seq_index + 1]
    decoded_sentence = Prediction.predict(input_seq,
                                          Prediction.encoder_predict_model,
                                          Prediction.decoder_predict_model,
                                          Prediction.num_steps_to_predict)
    print('-')
    input_seq[:, :, 0] *= 86400
    input_seq[:, :, 1] *= 180
    input_seq[:, :, 2] *= 360

    print(input_seq)
    print('pre:', decoded_sentence)
    class_res = Classification.estimator.predict(decoded_sentence)
    print('classification result:',
          Classification.encoder.inverse_transform(class_res))
    utils.plot_prediction(input_seq[0, :, :], None, decoded_sentence[0, :, :])