Beispiel #1
0
                                     random=True,
                                     categorical=False,
                                     data_augmentation=True)
generatordata_test = utils.batchGenerator(train_img,
                                          train_label_img,
                                          tilesize=img_rows,
                                          batch_size=batch_size,
                                          random=True,
                                          categorical=False,
                                          data_augmentation=False)
#fit the model
weights_path = 'weights_resnet.h5'
utils.fit_model(model,
                generatordata,
                generatordata_test,
                epocs=1000,
                samples_per_epoch=3000,
                nb_val_samples=300,
                early_stopping=True,
                patience=10,
                weightfile=weights_path,
                save_best=True)

model.load_weights(weights_path, by_name=True)
#make a prediction on the test image
utils.predict_image_tile_new(model,
                             validation_img,
                             tilesize=img_rows,
                             outputimg=predicted_img)
print("accuracy : " + str(utils.evaluate(predicted_img, validation_label_img)))
                                          validation_label_img,
                                          tilesize=img_rows,
                                          batch_size=500,
                                          random=True,
                                          categorical=True,
                                          nb_classes=nb_classes,
                                          data_augmentation=False)
data_test = generatordata_test.next()
#fit the model
weights_path = 'weights_resnet.h5'
#"""
history = utils.fit_model(model,
                          generatordata,
                          data_test,
                          epocs=1000,
                          samples_per_epoch=(rows * cols) / batch_size,
                          nb_val_samples=10,
                          early_stopping=True,
                          patience=1,
                          weightfile=weights_path,
                          save_best=True)

print(history.history.keys())
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
#"""
Beispiel #3
0
eo = ut.read_file("../data/S072R01.edf")
ec = ut.read_file("../data/S072R02.edf")


######## 1.1

fs = 160 # Frequency of sampling, given by data
resolution = 100 # Resolution of model (s.t. each bin has 1Hz of width)
freq = 10 # Frequency of interest
density = 0.2 # Density of the graph desired


###PDC

# Fitting PDC models
eo_pdc = ut.fit_model(eo, fs, resolution, "pdc", freq)
ec_pdc = ut.fit_model(ec, fs, resolution, "pdc", freq)

# Adjacency Matrices for 20% density networks
ut.adjacency_matrix(eo_pdc, ut.find_threshold(eo_pdc,density), "eo_pdc_20")
ut.adjacency_matrix(ec_pdc, ut.find_threshold(ec_pdc,density), "ec_pdc_20")



######## 1.2


# Fitting DTF models
eo_dtf = ut.fit_model(eo, fs, resolution, "dtf", freq)
ec_dtf = ut.fit_model(ec, fs, resolution, "dtf", freq)
Beispiel #4
0
            print("train with one image")
            generatordata, generatordata_test = utils.get_batch_generator(
                model,
                train_img,
                train_label_img,
                validation_img,
                validation_label_img,
                batch_size,
                augment_data=True,
                nb_classes=nb_classes)
        data_test = next(generatordata_test)
        history = utils.fit_model(model,
                                  generatordata,
                                  data_test,
                                  epocs=epochs,
                                  samples_per_epoch=train_samples,
                                  nb_val_samples=test_samples,
                                  early_stopping=False,
                                  weightfile=weights_path,
                                  save_best=True,
                                  monitor=monitor_metric)

        training_histories[modelname] = history

    print(training_histories)
    historyfile = expe_prefix + 'histories.csv'
    with open(historyfile, 'wb') as csv_file:
        writer = csv.writer(csv_file)
        for m, h in training_histories.items():
            for key, value in h.history.items():
                writer.writerow([m + key, value])
Beispiel #5
0
    # Mean mode medium of data
    utils.print_stats(prices)

    utils.r2_performance_metric_example()

    X_train, X_test, y_train, y_test = train_test_split(features,
                                                        prices,
                                                        test_size=0.2,
                                                        random_state=10)
    print("Training and testing split was successful.")

    utils.visulaize_learning_curves(features, prices)
    utils.visulaize_model_complexity_curves(X_train, y_train)

    # Fit the training data to the model using grid search
    decision_tree_reg = utils.fit_model(np.asarray(X_train),
                                        np.asarray(y_train))

    # # Produce the value for 'max_depth'
    print("Parameter 'max_depth' is {} for the optimal model.".format(
        decision_tree_reg.get_params()['max_depth']))

    features_set_to_predict = [
        [5, 17, 15],  # Client 1
        [4, 32, 22],  # Client 2
        [8, 3, 12]
    ]  # Client 3
    utils.predict_house_price(decision_tree_reg, features_set_to_predict)
    # Produce a matrix for client data

    vs.PredictTrials(features, prices, utils.fit_model,
                     features_set_to_predict)
                                     batch_size=batch_size,
                                     random=True,
                                     categorical=True,
                                     nb_classes=nb_classes)
generatordata_test = utils.batchGenerator(train_img,
                                          train_label_img,
                                          tilesize=img_rows,
                                          batch_size=batch_size,
                                          random=True,
                                          categorical=True,
                                          nb_classes=nb_classes)
#fit the model
utils.fit_model(model,
                generatordata,
                generatordata_test,
                epocs=1000,
                samples_per_epoch=30000,
                nb_val_samples=10,
                early_stopping=True,
                patience=5)
weights_path = 'weights.h5'
model.load_weights(weights_path, by_name=True)
#make a prediction on the test image
utils.predict_image_categorical_tile(
    model,
    validation_img,
    tilesize=img_rows,
    outputimg="prediction_categorical_center.tif")
print("accuracy : " + str(
    utils.evaluate("./prediction_categorical_center.tif",
                   validation_label_img)))
Beispiel #7
0
Kestimate = 5
param1 = 'C'
param2 = 'gamma'
grid1 = [0.001, 0.01, 0.1, 1, 10, 100]
grid2 = [0.001, 0.01, 0.1, 1, 10, 100]

# Create the hyperparameters grid
parameters = {param1: grid1, param2: grid2}

# Create a synthetic dataset with XOR classes
np.random.seed(0)
X = np.random.randn(instances, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)

# Fit the model
all_mean_fit_time, all_grid_search, test_score, best_parameters = fit_model(
    X, Y, parameters, 'All')
print("All - Best Test Score:", test_score)
print("All - Best Parameters:", best_parameters)

# Create datapoints plots
make_plots(X, Y, all_grid_search, 'All')

# Create scores heatmap
make_score_heatmap(parameters, param1, param2, all_grid_search, 'All')

# Get the reduced dataset
Xred, Yred, rtime = reduce(X, Y, Kneighbors, Kestimate)

# Fit the model on the reduced dataset
reduced_mean_fit_time, reduced_grid_search, test_score, best_parameters = fit_model(
    Xred, Yred, parameters, 'Reduced')