max_iter=20)
net = BrainNetCNN(
    input_shape=(90, 90),
    in_channels=1,
    num_classes=N_CLUSTERS,
    nb_e2e=32,
    nb_e2n=64,
    nb_n2g=30,
    dropout=0,
    leaky_alpha=0.1,
    twice_e2e=False,
    dense_sml=False)
net_params = pynet.NetParameters(
    network=net,
    clustering=kmeans,
    data_loader=train_loader,
    n_batchs=10,
    pca_dim=6,
    assignment_logfile=None,
    use_cuda=False)
model = DeepClusterClassifier(
    net_params,
    optimizer_name="SGD",
    learning_rate=0.001,
    momentum=0.9,
    weight_decay=10**-5,
    # loss=my_loss)
    loss_name="CrossEntropyLoss")
model.board = Board(port=8097, host="http://localhost", env="deepcluster")
model.add_observer("before_epoch", update_pseudo_labels)
model.add_observer("after_epoch", update_board)
示例#2
0
    plt.title(y_train[idx])

manager = DataManager.from_numpy(train_inputs=x_train,
                                 train_labels=y_train,
                                 validation_inputs=x_valid,
                                 validation_labels=y_valid,
                                 test_inputs=x_test,
                                 test_labels=y_test,
                                 batch_size=128,
                                 continuous_labels=True)
interfaces = pynet.get_interfaces()["graph"]
net_params = pynet.NetParameters(input_shape=(90, 90),
                                 in_channels=1,
                                 num_classes=2,
                                 nb_e2e=32,
                                 nb_e2n=64,
                                 nb_n2g=30,
                                 dropout=0.5,
                                 leaky_alpha=0.1,
                                 twice_e2e=False,
                                 dense_sml=True)
my_loss = pynet.get_tools()["losses"]["MSELoss"]()
model = interfaces["BrainNetCNNGraph"](net_params,
                                       optimizer_name="Adam",
                                       learning_rate=0.01,
                                       weight_decay=0.0005,
                                       loss_name="MSELoss")
model.board = Board(port=8097, host="http://localhost", env="main")
model.add_observer("after_epoch", update_board)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=model.optimizer,
                                           mode="min",
                                           factor=0.1,
示例#3
0
                                                     size=len(k_indices))
labels = np.ones((N_SAMPLES, 1)) * labels
print("dataset: x {0} - y {1}".format(data.shape, labels.shape))

# Create data manager
manager = DataManager.from_numpy(train_inputs=data,
                                 train_labels=labels,
                                 test_inputs=data,
                                 test_labels=labels,
                                 batch_size=BATCH_SIZE)

# Create model
net_params = pynet.NetParameters(in_order=ICO_ORDER,
                                 in_channels=2,
                                 out_channels=N_CLASSES,
                                 depth=3,
                                 start_filts=32,
                                 conv_mode="1ring",
                                 up_mode="transpose",
                                 cachedir=os.path.join(OUTDIR, "cache"))
model = SphericalUNetEncoder(net_params,
                             optimizer_name="SGD",
                             learning_rate=0.1,
                             momentum=0.99,
                             weight_decay=10**-4,
                             loss_name="CrossEntropyLoss",
                             use_cuda=True)
model.board = Board(port=8097, host="http://localhost", env="spherical_unet")
model.add_observer("after_epoch", update_board)

# Train model
test_history, train_history = model.training(manager=manager,