Exemplo n.º 1
0
# Train (optimize) the network.
# WARNING: If you have a high max_iter and no GPU, this could take awhile...
os.environ["HDF5_USE_FILE_LOCKING"] = 'FALSE'
#caffe.set_device(0)
#caffe.set_mode_cpu()
E2Nnet_sml.fit(x_train, y_train, x_val,
               y_val)  # If no valid data, could put test data here.

print("mission completed!")

# %%
# Plot the training iterations vs. the training loss, the valid data mean-absolute-difference,
# and the valid data correlation with predicted and true (y_vald) labels.
file_name = os.getcwd() + "/models/plot_metrics.png"
# file_name = os.path.join(os.getcwd(), name)
E2Nnet_sml.plot_iter_metrics(True, file_name)

# %%
# Predict labels of test data
preds = E2Nnet_sml.predict(x_val)
preds = np.reshape(preds, (len(preds), 1))

# %%
# Compute the metrics.
E2Nnet_sml.print_results(preds, y_val)
print("predictions raw", preds)
print("y_test", y_val)
preds_trans = np.zeros((preds.shape))
preds_trans[preds >= 0.5] = 1
preds_trans[preds < 0.5] = 0
print("predictions", preds_trans)
Exemplo n.º 2
0
    ['fc', {'n_filters': 30}],  # Fully connected (n2g) layer with 30 filters.
    ['relu', {'negative_slope': 0.33}],
    ['out', {'n_filters': 1}]]  # Output layer with num_outs nodes as outputs.

e2e_arch = [
    ['e2e',  # e2e layer
     {'n_filters': 32,  # 32 feature maps
      'kernel_h': x_train.shape[2], 'kernel_w': x_train.shape[3]  # Sliding cross filter of size h x 1 by 1 x w
      }
     ],
    ['e2e',  # e2e layer
     {'n_filters': 32,  # 32 feature maps
      'kernel_h': x_train.shape[2], 'kernel_w': x_train.shape[3]  # Sliding cross filter of size h x 1 by 1 x w
      }
     ],
    ['e2n', {'n_filters': 64, 'kernel_h': x_train.shape[2], 'kernel_w': x_train.shape[3]}],
    ['dropout', {'dropout_ratio': 0.5}],
    ['relu', {'negative_slope': 0.33}],
    ['fc', {'n_filters': 30}],
    ['relu', {'negative_slope': 0.33}],
    ['out', {'n_filters': 1}]
]

hello_net = BrainNetCNN('e2e2', e2e_arch)  # Create BrainNetCNN model
hello_net.fit(x_train, y_train[:, 0], x_valid, y_valid[:, 0])  # Train (regress only on class 0)
preds = hello_net.predict(x_test)  # Predict labels of test data
print("Correlation:", pearsonr(preds, y_test[:, 0])[0])
hello_net.plot_iter_metrics()

print("Correlation:", pearsonr(preds, y_test[:, 0])[0])
Exemplo n.º 3
0
    'max_iter'] = 1000  # Train the model for 1000 iterations. (note this should be run for much longer!)
E2Nnet_sml.pars[
    'test_interval'] = 50  # Check the valid data every 50 iterations.
E2Nnet_sml.pars[
    'snapshot'] = 1000  # Save the model weights every 1000 iterations.

# %%
# Train (optimize) the network.
# WARNING: If you have a high max_iter and no GPU, this could take awhile...
E2Nnet_sml.fit(x_train, y_train, x_valid,
               y_valid)  # If no valid data, could put test data here.

# %%
# Plot the training iterations vs. the training loss, the valid data mean-absolute-difference,
# and the valid data correlation with predicted and true (y_vald) labels.
E2Nnet_sml.plot_iter_metrics()

# %%
# Predict labels of test data
preds = E2Nnet_sml.predict(x_test)

# %%
# Compute the metrics.
E2Nnet_sml.print_results(preds, y_test)

# %%
# We can save the model like this.
E2Nnet_sml.save('models/E2Nnet_sml.pkl')

# %%
# Now let's try removing and loading the saved model.