Пример #1
0
    orginal = validation_dataset.inputs[corresponding_index, :1, :, :, 64]
    moving = np.expand_dims(moving, axis=1)
    reference = np.expand_dims(reference, axis=1)
    orginal = np.expand_dims(orginal, axis=1)
    moving = (moving / moving.max())
    moving = moving * 255
    reference = (reference / reference.max())
    reference = reference * 255
    orginal = (orginal / reference.max())
    orginal = orginal * 255
    return np.concatenate((moving, orginal, reference), axis=0)


net.board = Board(port=8097,
                  host="http://localhost",
                  env=base_network,
                  display_pred=True,
                  prepare_pred=prepare_pred)
net.add_observer("after_epoch", update_board)

scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=net.optimizer,
                                           mode="min",
                                           factor=0.5,
                                           patience=4,
                                           verbose=True,
                                           min_lr=1e-7)
train_history, valid_history = net.training(
    manager=manager,
    nb_epochs=(1 if "CI_MODE" in os.environ else 150000),
    checkpointdir=None,  # outdir,
    fold_index=0,
Пример #2
0
    img = (img / img.max()) * 255
    board.viewer.images(
        img,
        opts={
            "title": "sampling",
            "caption": "sampling"},
        win="sampling")    

model = VAE(input_dim=(28 * 28), hidden_dim=128, latent_dim=20)
interface = DeepLearningInterface(
    model=model,
    optimizer_name="Adam",
    learning_rate=0.001,
    loss=DecodeLoss())
interface.board = Board(
    port=8097, host="http://localhost", env="vae", display_pred=True,
    prepare_pred=prepare_pred)
interface.add_observer("after_epoch", update_board)
interface.add_observer("after_epoch", sampling)
test_history, train_history = interface.training(
    manager=manager,
    nb_epochs=10,
    checkpointdir=None,
    fold_index=0,
    with_validation=True)


#############################################################################
# Conclusion
# ----------
#
Пример #3
0
                                 in_channels=1,
                                 num_classes=2,
                                 nb_e2e=32,
                                 nb_e2n=64,
                                 nb_n2g=30,
                                 dropout=0.5,
                                 leaky_alpha=0.1,
                                 twice_e2e=False,
                                 dense_sml=True)
my_loss = pynet.get_tools()["losses"]["MSELoss"]()
model = interfaces["BrainNetCNNGraph"](net_params,
                                       optimizer_name="Adam",
                                       learning_rate=0.01,
                                       weight_decay=0.0005,
                                       loss_name="MSELoss")
model.board = Board(port=8097, host="http://localhost", env="main")
model.add_observer("after_epoch", update_board)
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer=model.optimizer,
                                           mode="min",
                                           factor=0.1,
                                           patience=5,
                                           verbose=True,
                                           eps=1e-8)
test_history, train_history = model.training(manager=manager,
                                             nb_epochs=15,
                                             checkpointdir=None,
                                             fold_index=0,
                                             scheduler=scheduler,
                                             with_validation=True)
y_pred, X, y_true, loss, values = model.testing(manager=manager,
                                                with_logit=True,
Пример #4
0
    pretrained = None
params = NetParameters(input_shape=(61, 73, 61),
                       cardinality=1,
                       layers=[3, 4, 6, 3],
                       n_channels_in=1,
                       decode=True)
interface = ResAENetEncoder(params,
                            optimizer_name="Adam",
                            learning_rate=0.001,
                            loss_name="MSELoss",
                            pretrained=pretrained,
                            use_cuda=True)

# Train model
if pretrained is None:
    interface.board = Board(port=8097, host="http://localhost", env="resnet")
    interface.add_observer("after_epoch", update_board)
    test_history, train_history = interface.training(
        manager=manager,
        nb_epochs=100,
        checkpointdir=os.path.join(WORKDIR, "checkpoint_" + name),
        fold_index=0,
        with_validation=False)


def dummy_loss(*args, **kwargs):
    return -1


# Get latent parameters
if not os.path.isfile(PREDFILE):
Пример #5
0
                                start_filts=64,
                                with_logit=False).to(device)
encoder = BGDiscriminator(in_shape=in_shape,
                          in_channels=channels,
                          out_channels=latent_dim,
                          start_filts=64,
                          with_logit=False).to(device)
g_optimizer = torch.optim.Adam(generator.parameters(), lr=0.0002)
cd_optimizer = torch.optim.Adam(code_discriminator.parameters(), lr=0.0002)
d_optimizer = torch.optim.Adam(discriminator.parameters(), lr=0.0002)
e_optimizer = torch.optim.Adam(encoder.parameters(), lr=0.0002)
real_y = Variable(
    torch.ones((batch_size, channels)).to(device, non_blocking=True))
fake_y = Variable(
    torch.zeros((batch_size, channels)).to(device, non_blocking=True))
board = Board(port=8097, host="http://localhost", env="vae")
outdir = "/tmp/vae-gan/checkpoint"
if not os.path.isdir(outdir):
    os.makedirs(outdir)

g_iter = 1
d_iter = 1
cd_iter = 1
total_iter = 200000
train_loader = manager.get_dataloader(train=True,
                                      validation=False,
                                      fold_index=0).train
loader = infinite_train_generartor(train_loader)

for iteration in range(total_iter):
Пример #6
0
        "target": small_image, "cost": "corratio", "interp": "trilinear", "dof": 6}),
    "biasfield": (biasfield, {"nb_iterations": 3}),
    "downsample": (downsample, {"scale": 2}),
    "padd": (padd, {"shape": [256, 256, 256], "fill_value": 0}),
}

#############################################################################
# Test preprocessings
# --------------------
#
# We now apply the preprocessing steps on the loaded image. Results are
# directly displayed in your browser at http://localhost:8097.

from pynet.plotting import Board

board = Board(port=8097, host="http://localhost", env="data-preprocessing")
for key, (fct, kwargs) in processes.items():
    print("Processing {0}...".format(key))
    if key in ("reorient2std", "biasfield", "register"):
        normalized = fct(small_image, **kwargs).get_data()
    else:
        normalized = fct(small_image.get_data(), **kwargs)
    if key in ("padd", "downsample", "register"):
        images = np.expand_dims(rescale(normalized, dynamic=(0, 255)), axis=0)
    else:
        images = np.asarray([rescale(small_image.get_data(), dynamic=(0, 255)),
                             rescale(normalized, dynamic=(0, 255))])
    images = images[..., images.shape[-1] // 2]
    images = np.expand_dims(images, axis=1)
    board.viewer.images(
        images, opts={"title": key, "caption": key}, win=key)
    network=net,
    clustering=kmeans,
    data_loader=train_loader,
    n_batchs=10,
    pca_dim=6,
    assignment_logfile=None,
    use_cuda=False)
model = DeepClusterClassifier(
    net_params,
    optimizer_name="SGD",
    learning_rate=0.001,
    momentum=0.9,
    weight_decay=10**-5,
    # loss=my_loss)
    loss_name="CrossEntropyLoss")
model.board = Board(port=8097, host="http://localhost", env="deepcluster")
model.add_observer("before_epoch", update_pseudo_labels)
model.add_observer("after_epoch", update_board)


# Train model
test_history, train_history = model.training(
    manager=manager,
    nb_epochs=N_EPOCHS,
    checkpointdir=None,
    fold_index=0,
    scheduler=None,
    with_validation=False)


# Test model
Пример #8
0
                               optimizer_name="Adam",
                               learning_rate=0.00001,
                               loss=DecodeLoss(rec_loss="mse"),
                               use_cuda=False)
    name = MODEL
else:
    params = NetParameters(input_dim=iterator.shape[-1])
    interface = STAAENetEncoder(params,
                                optimizer_name="Adam",
                                learning_rate=0.001,
                                loss_name="MSELoss",
                                use_cuda=True)
    name = MODEL

# Train model
interface.board = Board(port=8097, host="http://localhost", env="dvae")
interface.add_observer("after_epoch", update_board)
test_history, train_history = interface.training(manager=manager,
                                                 nb_epochs=50,
                                                 checkpointdir=os.path.join(
                                                     WORKDIR,
                                                     "checkpoint_" + name),
                                                 fold_index=0,
                                                 with_validation=False)

# Create test data
manager = DataManager.from_numpy(test_inputs=iterator,
                                 batch_size=BATCH_SIZE,
                                 add_input=True)

Пример #9
0
    "add_offset": (add_offset, {
        "factor": (0.05, 0.1)
    }),
    "compose_transforms": (compose_transforms, {}),
}

#############################################################################
# Test transformations
# --------------------
#
# We now apply the transformations on the loaded image. Results are
# directly displayed in your browser at http://localhost:8097.

from pynet.plotting import Board

board = Board(port=8097, host="http://localhost", env="data-augmentation")
for cnt in range(10):
    print("Iteration: ", cnt)
    for key, (fct, kwargs) in transforms.items():
        images = np.asarray([image, np.clip(fct(image, **kwargs), 0, 255)])
        images = images[..., images.shape[-1] // 2]
        images = np.expand_dims(images, axis=1)
        board.viewer.images(images,
                            opts={
                                "title": key,
                                "caption": key
                            },
                            win=key)
    time.sleep(1)

#############################################################################
Пример #10
0
net_params = pynet.NetParameters(in_order=ICO_ORDER,
                                 in_channels=2,
                                 out_channels=N_CLASSES,
                                 depth=3,
                                 start_filts=32,
                                 conv_mode="1ring",
                                 up_mode="transpose",
                                 cachedir=os.path.join(OUTDIR, "cache"))
model = SphericalUNetEncoder(net_params,
                             optimizer_name="SGD",
                             learning_rate=0.1,
                             momentum=0.99,
                             weight_decay=10**-4,
                             loss_name="CrossEntropyLoss",
                             use_cuda=True)
model.board = Board(port=8097, host="http://localhost", env="spherical_unet")
model.add_observer("after_epoch", update_board)

# Train model
test_history, train_history = model.training(manager=manager,
                                             nb_epochs=N_EPOCHS,
                                             checkpointdir=None,
                                             fold_index=0,
                                             scheduler=None,
                                             with_validation=False)

# Test model
y_pred, X, y_true, loss, values = model.testing(manager=manager,
                                                with_logit=True,
                                                predict=True)
print(y_pred.shape, X.shape, y_true.shape)