コード例 #1
0
def e2n_generator(net_name,
                  h,
                  w,
                  n_injuries,
                  max_iter=10000,
                  test_interval=50,
                  snapshot=1000):
    # Specify the architecture using a list of dictionaries.
    e2n_arch = [
        [
            'e2n',  # e2n layer 
            {
                'n_filters': 130,  # 130 feature maps 
                'kernel_h': h,
                'kernel_w':
                w  # Cross filter of size h x 1 by 1 x w (non-sliding, only on diagonal)
            }
        ],
        ['dropout', {
            'dropout_ratio': 0.5
        }],  # Dropout with 0.5 dropout rate.
        ['relu', {
            'negative_slope': 0.33
        }],  # Very leaky ReLU.
        ['fc', {
            'n_filters': 30
        }],  # Fully connected/dense (Node-to-Graph when after e2n) layer
        ['relu', {
            'negative_slope': 0.33
        }],  # Very leaky ReLU
        ['out', {
            'n_filters': n_injuries
        }]  # Output with 2 nodes that correspond to 2 injuries.
    ]

    # Create BrainNetCNN model
    E2Nnet_sml = BrainNetCNN(
        net_name,  # Unique model name.
        e2n_arch,  # List of dictionaries specifying the architecture.
        hardware='cpu',  # Or 'cpu'.
        dir_data='./generated_synthetic_data',  # Where to write the data to.
    )
    #set pars
    E2Nnet_sml.pars[
        'max_iter'] = max_iter  # Train the model for 1000 iterations. (note this should be run for much longer!)
    E2Nnet_sml.pars[
        'test_interval'] = test_interval  # Check the valid data every 50 iterations.
    E2Nnet_sml.pars[
        'snapshot'] = snapshot  # Save the model weights every 1000 iterations.

    return E2Nnet_sml
コード例 #2
0
ファイル: helloworld.py プロジェクト: UsadiqPurdue/DL
    ['fc', {'n_filters': 30}],  # Fully connected (n2g) layer with 30 filters.
    ['relu', {'negative_slope': 0.33}],
    ['out', {'n_filters': 1}]]  # Output layer with num_outs nodes as outputs.

e2e_arch = [
    ['e2e',  # e2e layer
     {'n_filters': 32,  # 32 feature maps
      'kernel_h': x_train.shape[2], 'kernel_w': x_train.shape[3]  # Sliding cross filter of size h x 1 by 1 x w
      }
     ],
    ['e2e',  # e2e layer
     {'n_filters': 32,  # 32 feature maps
      'kernel_h': x_train.shape[2], 'kernel_w': x_train.shape[3]  # Sliding cross filter of size h x 1 by 1 x w
      }
     ],
    ['e2n', {'n_filters': 64, 'kernel_h': x_train.shape[2], 'kernel_w': x_train.shape[3]}],
    ['dropout', {'dropout_ratio': 0.5}],
    ['relu', {'negative_slope': 0.33}],
    ['fc', {'n_filters': 30}],
    ['relu', {'negative_slope': 0.33}],
    ['out', {'n_filters': 1}]
]

hello_net = BrainNetCNN('e2e2', e2e_arch)  # Create BrainNetCNN model
hello_net.fit(x_train, y_train[:, 0], x_valid, y_valid[:, 0])  # Train (regress only on class 0)
preds = hello_net.predict(x_test)  # Predict labels of test data
print("Correlation:", pearsonr(preds, y_test[:, 0])[0])
hello_net.plot_iter_metrics()

print("Correlation:", pearsonr(preds, y_test[:, 0])[0])
コード例 #3
0
    }],  # Very leaky ReLU.
    ['fc', {
        'n_filters': 30
    }],  # Fully connected/dense (Node-to-Graph when after e2n) layer
    ['relu', {
        'negative_slope': 0.33
    }],  # Very leaky ReLU
    ['out', {
        'n_filters': n_injuries
    }]  # Output with 2 nodes that correspond to 2 injuries.
]

# Create BrainNetCNN model
E2Nnet_sml = BrainNetCNN(
    net_name,  # Unique model name.
    e2n_arch,  # List of dictionaries specifying the architecture.
    hardware='cpu',  # Or 'cpu'.
    dir_data='./generated_synthetic_data',  # Where to write the data to.
)
#set pars
E2Nnet_sml.pars[
    'max_iter'] = 100  # Train the model for 1000 iterations. (note this should be run for much longer!)
E2Nnet_sml.pars[
    'test_interval'] = 50  # Check the valid data every 50 iterations.
E2Nnet_sml.pars[
    'snapshot'] = 20  # Save the model weights every 1000 iterations.

# In[32]:

# %%
# Train (optimize) the network.
# WARNING: If you have a high max_iter and no GPU, this could take awhile...
コード例 #4
0
def e2e(net_name,
        h,
        w,
        n_injuries,
        max_iter=10000,
        test_interval=100,
        snapshot=1000):
    # Specify the architecture.
    e2e_arch = [
        [
            'e2e',  # e2e layer 
            {
                'n_filters': 32,  # 32 feature maps 
                'kernel_h': h,
                'kernel_w': w  # Sliding cross filter of size h x 1 by 1 x w
            }
        ],
        ['e2n', {
            'n_filters': 64,
            'kernel_h': h,
            'kernel_w': w
        }],
        ['dropout', {
            'dropout_ratio': 0.5
        }],
        ['relu', {
            'negative_slope': 0.33
        }],
        ['fc', {
            'n_filters': 30
        }],
        ['relu', {
            'negative_slope': 0.33
        }],
        ['out', {
            'n_filters': n_injuries
        }]
    ]

    # Create BrainNetCNN model
    E2Enet_sml = BrainNetCNN(
        net_name,
        e2e_arch,
        hardware='cpu',  # Or 'cpu'.
        dir_data='./generated_synthetic_data',  # Where to write the data to.
    )

    # Overwrite default parameters.
    # ann4brains.nets.get_default_hyper_params() shows the hyper-parameters that can be overwritten.
    #E2Enet_sml.pars['max_iter'] = 100000 # Train the model for 100K iterations.
    #E2Enet_sml.pars['test_interval'] = 500 # Check the valid data every 500 iterations.
    #E2Enet_sml.pars['snapshot'] = 10000 # Save the model weights every 10000 iterations.

    # NOTE using the above parameters takes awhile for the model to train (~2 hours ish? on a GPU)
    # If you want to do some simple fast experiments to start, use these settings instead.
    E2Enet_sml.pars[
        'max_iter'] = max_iter  # Train the model for 100 iterations (note this should be run for much longer!)
    E2Enet_sml.pars[
        'test_interval'] = test_interval  # Check the valid data every 20 iterations.
    E2Enet_sml.pars[
        'snapshot'] = snapshot  # Save the model weights every 100 iterations.

    return E2Enet_sml
コード例 #5
0
ファイル: helloworld.py プロジェクト: oadenekan123/ann4norm
    [
        'e2n',
        {
            'n_filters': 16,  # e2n layer with 16 filters.
            'kernel_h': x_train.shape[2],
            'kernel_w': x_train.shape[3]
        }
    ],  # Same dimensions as spatial inputs.
    ['dropout', {
        'dropout_ratio': 0.5
    }],  # Dropout at 0.5
    ['relu', {
        'negative_slope': 0.33
    }],  # For leaky-ReLU
    ['fc', {
        'n_filters': 30
    }],  # Fully connected (n2g) layer with 30 filters.
    ['relu', {
        'negative_slope': 0.33
    }],
    ['out', {
        'n_filters': 1
    }]
]  # Output layer with num_outs nodes as outputs.

hello_net = BrainNetCNN('hello_world', hello_arch)  # Create BrainNetCNN model
hello_net.fit(x_train, y_train[:, 0], x_valid,
              y_valid[:, 0])  # Train (regress only on class 0)
preds = hello_net.predict(x_test)  # Predict labels of test data
print("Correlation:", pearsonr(preds, y_test[:, 0])[0])
コード例 #6
0
        'negative_slope': 0.33
    }],
    ['out', {
        'n_filters': 1
    }]
]

exp_arch = [['fc', {
    'n_filters': 2
}], ['relu', {
    'negative_slope': 0.33
}], ['out', {
    'n_filters': 1
}]]

siam_A = BrainNetCNN('ann_siam', exp_arch)  # Create BrainNetCNN model

siam_A.pars[
    'max_iter'] = epochs * num_batch  # running for correct number of iterations
siam_A.pars['snapshot'] = epochs * num_batch
siam_A.pars['train_batch_size'] = batch_size
siam_A.pars['test_batch_size'] = batch_size

#siam_A.fit_siamese(x_train, y_train, x_valid, y_valid)
ann_train = siam_A.fit_siamese(x_train, y_train, x_valid, y_valid)

print("Finished executing ANN")

## SIAMESE GCN
## Starting with implementing siamese structure for GCN