コード例 #1
0
def model_pred(model_name, test, testTemporal, topo_data):
    # test generator
    test_gene = test_generator(test, testTemporal, topo_data, BATCHSIZE, TIMESTEP)
    test_sep = (test.shape[0] - TIMESTEP) * test.shape[1] // BATCHSIZE

    # get predict
    model = get_model_structure(model_name)
    # model = multi_gpu_model(model, gpus=2)  # gpu parallel
    model.compile(loss=LOSS, optimizer=OPTIMIZER)
    model.load_weights(PATH + '/' + MODELNAME + '.h5')
    predY = model.predict_generator(test_gene, steps=test_sep)

    # ground truth
    testY = get_test_true(test, TIMESTEP, model_name)

    # compute mse
    scaled_testY = np.reshape(testY, ((test.shape[0] - TIMESTEP), HEIGHT, WIDTH))
    scaled_predTestY = np.reshape(predY, ((test.shape[0] - TIMESTEP), HEIGHT, WIDTH))
    print('test scale shape: ', scaled_predTestY.shape)
    scale_MSE = np.mean((scaled_testY - scaled_predTestY) ** 2)
    print("Model scaled MSE", scale_MSE)

    rescale_MSE = scale_MSE * MAX_VALUE ** 2
    print("Model rescaled MSE", rescale_MSE)

    with open(PATH + '/' + MODELNAME + '_prediction_scores.txt', 'a') as wf:
        wf.write("Keras MSE on testData, %f\n" % scale_MSE)
        wf.write("Rescaled MSE on testData, %f\n" % rescale_MSE)

    np.save(PATH + '/' + MODELNAME + '_prediction.npy', scaled_predTestY * MAX_VALUE)
    np.save(PATH + '/' + MODELNAME + '_groundtruth.npy', scaled_testY * MAX_VALUE)
コード例 #2
0
def model_train(model_name, train_data, valid_data, trainTemporal,
                validTemporal, topo_data):
    # set callbacks
    csv_logger = CSVLogger(PATH + '/' + MODELNAME + '.log')
    checkpointer_path = PATH + '/' + MODELNAME + '.h5'
    checkpointer = ModelCheckpoint(filepath=checkpointer_path,
                                   verbose=1,
                                   save_best_only=True)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   patience=10,
                                   verbose=1,
                                   mode='auto')
    LearnRate = LearningRateScheduler(lambda epoch: LR)

    # data generator
    train_generator = data_generator(train_data, trainTemporal, topo_data,
                                     BATCHSIZE, TIMESTEP, model_name)
    val_generator = data_generator(valid_data, validTemporal, topo_data,
                                   BATCHSIZE, TIMESTEP, model_name)
    sep = (train_data.shape[0] - TIMESTEP) * train_data.shape[1] // BATCHSIZE
    val_sep = (valid_data.shape[0] -
               TIMESTEP) * valid_data.shape[1] // BATCHSIZE

    # train model
    model = get_model_structure(model_name)
    # model = multi_gpu_model(model, gpus=2)  # gpu parallel
    model.compile(loss=LOSS, optimizer=OPTIMIZER)
    model.fit_generator(
        train_generator,
        steps_per_epoch=sep,
        epochs=EPOCH,
        validation_data=val_generator,
        validation_steps=val_sep,
        callbacks=[csv_logger, checkpointer, LearnRate, early_stopping])

    # compute mse
    val_nolabel_generator = test_generator(valid_data, validTemporal,
                                           topo_data, BATCHSIZE, TIMESTEP)
    val_predY = model.predict_generator(val_nolabel_generator, steps=val_sep)
    valY = get_test_true(valid_data, TIMESTEP, model_name)
    # mse
    scaled_valY = np.reshape(valY,
                             ((valid_data.shape[0] - TIMESTEP), HEIGHT, WIDTH))
    scaled_predValY = np.reshape(
        val_predY, ((valid_data.shape[0] - TIMESTEP), HEIGHT, WIDTH))
    print('val scale shape: ', scaled_predValY.shape)
    val_scale_MSE = np.mean((scaled_valY - scaled_predValY)**2)
    print("Model val scaled MSE", val_scale_MSE)
    # rescale mse
    val_rescale_MSE = val_scale_MSE * MAX_VALUE**2
    print("Model val rescaled MSE", val_rescale_MSE)

    # write record
    with open(PATH + '/' + MODELNAME + '_prediction_scores.txt', 'a') as wf:
        wf.write('train start time: {}\n'.format(StartTime))
        wf.write('train end time:   {}\n'.format(
            datetime.datetime.now().strftime('%Y%m%d_%H%M%S')))
        wf.write("Keras MSE on trainData, %f\n" % val_scale_MSE)
        wf.write("Rescaled MSE on trainData, %f\n" % val_rescale_MSE)
コード例 #3
0
def model_pred(model_name, test, testTemporal, topo_data, type):
    # test generator
    test_gene = test_generator(test, testTemporal, topo_data, BATCHSIZE,
                               TIMESTEP)
    test_sep = (test.shape[0] - TIMESTEP) * test.shape[1] // BATCHSIZE

    # get predict
    model = get_model_structure(model_name)
    # model = multi_gpu_model(model, gpus=2)  # gpu parallel
    model.compile(loss=LOSS, optimizer=OPTIMIZER)
    model.load_weights(PATH + '/' + MODELNAME + '_' + type + '.h5')
    predY = model.predict_generator(test_gene, steps=test_sep)

    # ground truth
    testY = get_test_true(test, TIMESTEP, model_name)

    # compute mse
    scaled_testY = np.reshape(
        testY, ((test.shape[0] - TIMESTEP), REGION_SIZE, REGION_SIZE))
    scaled_predTestY = np.reshape(
        predY, ((test.shape[0] - TIMESTEP), REGION_SIZE, REGION_SIZE))
    print('test scale shape: ', scaled_predTestY.shape)
    scale_MSE = np.mean((scaled_testY - scaled_predTestY)**2)
    print("Model scaled MSE", scale_MSE)

    retestY, repredTestY = testY * MAX_VALUE, predY * MAX_VALUE
    retestY = np.reshape(
        retestY, ((test.shape[0] - TIMESTEP), REGION_SIZE, REGION_SIZE))
    repredTestY = np.reshape(
        repredTestY, ((test.shape[0] - TIMESTEP), REGION_SIZE, REGION_SIZE))
    print('rescale shape: ', repredTestY.shape)
    rescale_MSE = np.mean((retestY - repredTestY)**2)
    print("Model rescaled MSE", rescale_MSE)

    with open(PATH + '/' + MODELNAME + '_prediction_scores.txt', 'a') as wf:
        wf.write("Keras MSE on flow {} testData, {}\n".format(type, scale_MSE))
        wf.write("Rescaled MSE on flow {} testData, {}\n\n".format(
            type, rescale_MSE))
    return scale_MSE, rescale_MSE