Пример #1
0
def breast_cancer(x_train, y_train, x_val, y_val, params):
    print("Iteration parameters: ", params)

    def weights_init_uniform_rule(m):
        classname = m.__class__.__name__
        if classname.find('Linear') != -1:
            n = m.in_features
            y = 1.0 / np.sqrt(n)
            m.weight.data.uniform_(-y, y)
            m.bias.data.fill_(0)

    manager = DataManager.from_numpy(train_inputs=x_train,
                                     train_labels=y_train,
                                     batch_size=params["batch_size"],
                                     validation_inputs=x_val,
                                     validation_labels=y_val)
    net = BreastCancerNet(n_feature=x_train.shape[1],
                          first_neuron=params["first_neuron"],
                          second_neuron=params["second_neuron"],
                          dropout=params["dropout"])
    net.apply(weights_init_uniform_rule)
    net.init_history()
    model = DeepLearningInterface(model=net,
                                  optimizer_name=params["optimizer_name"],
                                  learning_rate=params["learning_rate"],
                                  loss_name=params["loss_name"],
                                  metrics=["accuracy"])
    model.add_observer("after_epoch", update_talos_history)
    model.training(manager=manager,
                   nb_epochs=params["epochs"],
                   checkpointdir=None,
                   fold_index=0,
                   with_validation=True)
    return net, net.parameters()
Пример #2
0
masker = MultiNiftiMasker(mask_img=mask_img, standardize=True)
masker.fit()
if not os.path.isfile(DATAFILE):
    y = np.concatenate(masker.transform(func_filenames), axis=0)
    print(y.shape)
    np.save(DATAFILE, y)
else:
    y = np.load(DATAFILE)
iterator = masker.inverse_transform(y).get_fdata()
iterator = iterator.transpose((3, 0, 1, 2))
iterator = np.expand_dims(iterator, axis=1)
print(iterator.shape)

# Data iterator
manager = DataManager.from_numpy(train_inputs=iterator,
                                 batch_size=BATCH_SIZE,
                                 add_input=True)

# Create model
name = "ResAENet"
model_weights = os.path.join(WORKDIR, "checkpoint_" + name,
                             "model_0_epoch_{0}.pth".format(EPOCH))
if os.path.isfile(model_weights):
    pretrained = model_weights
else:
    pretrained = None
params = NetParameters(input_shape=(61, 73, 61),
                       cardinality=1,
                       layers=[3, 4, 6, 3],
                       n_channels_in=1,
                       decode=True)
Пример #3
0
# Show example noisy training data that have the signatures applied.
# It's not obvious to the human eye the subtle differences, but the cross
# row and column above perturbed the below matrices with the y weights.
# Show in the title how much each signature is weighted by.
plt.figure(figsize=(16, 4))
for idx in range(3):
    plt.subplot(1, 3, idx + 1)
    plt.imshow(np.squeeze(x_train[idx]), interpolation="None")
    plt.colorbar()
    plt.title(y_train[idx])

manager = DataManager.from_numpy(train_inputs=x_train,
                                 train_labels=y_train,
                                 validation_inputs=x_valid,
                                 validation_labels=y_valid,
                                 test_inputs=x_test,
                                 test_labels=y_test,
                                 batch_size=128,
                                 continuous_labels=True)
interfaces = pynet.get_interfaces()["graph"]
net_params = pynet.NetParameters(input_shape=(90, 90),
                                 in_channels=1,
                                 num_classes=2,
                                 nb_e2e=32,
                                 nb_e2n=64,
                                 nb_n2g=30,
                                 dropout=0.5,
                                 leaky_alpha=0.1,
                                 twice_e2e=False,
                                 dense_sml=True)
my_loss = pynet.get_tools()["losses"]["MSELoss"]()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
losses = pynet.get_tools(tool_name="losses")
setup_logging(level="info")

#############################################################################
# Kang dataset
# ------------
#
# Fetch & load the Kang dataset:

data, trainset, testset, membership_mask = fetch_kang(datasetdir=datasetdir,
                                                      random_state=0)
gtpath = os.path.join(datasetdir, "kang_recons.h5ad")
manager = DataManager.from_numpy(train_inputs=trainset,
                                 validation_inputs=testset,
                                 test_inputs=data.X,
                                 batch_size=batch_size,
                                 sampler="random",
                                 add_input=True)

#############################################################################
# Training
# --------
#
# Create/train the model:

if checkpointdir is not None:
    weights_filename = os.path.join(
        checkpointdir, "model_0_epoch_{0}.pth".format(nb_epochs - 1))
params = NetParameters(membership_mask=membership_mask,
                       latent_dim=latent_dim,
                       hidden_layers=[12],
Пример #5
0
    nb_features = 50
    for name, nb_samples in (("train", 1000), ("test", 2)):
        x1 = torch.randn(nb_samples, 50)
        x2 = torch.randn(nb_samples, 50) + 1.5
        x = torch.cat([x1, x2], dim=0)
        y1 = torch.zeros(nb_samples, 1)
        y2 = torch.ones(nb_samples, 1)
        y = torch.cat([y1, y2], dim=0)
        toy_data[name] = (x, y)
        if name == "train":
            plt.figure()
            plt.scatter(x1[:, 0], x1[:, 1], color="b")
            plt.scatter(x2[:, 0], x2[:, 1], color="r")
    manager = DataManager.from_numpy(train_inputs=toy_data["train"][0],
                                     train_labels=toy_data["train"][1],
                                     batch_size=50,
                                     test_inputs=toy_data["test"][0],
                                     test_labels=toy_data["test"][1])


class DenseFeedForwardNet(nn.Module):
    def __init__(self, nb_features):
        """ Initialize the instance.

        Parameters
        ----------
        nb_features: int
            the size of the feature vector.
        """
        super(DenseFeedForwardNet, self).__init__()
        self.layers = nn.Sequential(
    # Show in the title how much each signature is weighted by.
    plt.figure(figsize=(16, 4))
    for idx in range(3):
        plt.subplot(1, 3, idx + 1)
        plt.imshow(np.squeeze(x_train[idx]), interpolation="None")
        plt.colorbar()
        plt.title(y_train[idx])

data = np.concatenate(data, axis=0)
labels = np.asarray(labels)
print("dataset: x {0} - y {1}".format(data.shape, labels.shape))


# Create data manager
manager = DataManager.from_numpy(
    train_inputs=data, train_labels=np.zeros(labels.shape),
    batch_size=BATCH_SIZE)


class FKmeans(object):
    def __init__(self, n_clusters):
        self.n_clusters = n_clusters

    def fit(self, data):
        n_data, d = data.shape
        self.clus = faiss.Kmeans(d, self.n_clusters)
        self.clus.seed = np.random.randint(1234)
        self.clus.niter = 20
        self.clus.max_points_per_centroid = 10000000
        self.clus.train(data)
Пример #7
0
    [np.round(np.mean(labels[tri])) for tri in ico_triangles])
plot_trisurf(fig, ax, ico_vertices, ico_triangles, tri_texture)
data = np.zeros((N_SAMPLES, N_CLASSES, len(labels)), dtype=float)
for klass in (0, 1):
    k_indices = np.argwhere(labels == 0).squeeze()
    for loc, scale in SAMPLES[klass]:
        data[:, klass, k_indices] = np.random.normal(loc=loc,
                                                     scale=scale,
                                                     size=len(k_indices))
labels = np.ones((N_SAMPLES, 1)) * labels
print("dataset: x {0} - y {1}".format(data.shape, labels.shape))

# Create data manager
manager = DataManager.from_numpy(train_inputs=data,
                                 train_labels=labels,
                                 test_inputs=data,
                                 test_labels=labels,
                                 batch_size=BATCH_SIZE)

# Create model
net_params = pynet.NetParameters(in_order=ICO_ORDER,
                                 in_channels=2,
                                 out_channels=N_CLASSES,
                                 depth=3,
                                 start_filts=32,
                                 conv_mode="1ring",
                                 up_mode="transpose",
                                 cachedir=os.path.join(OUTDIR, "cache"))
model = SphericalUNetEncoder(net_params,
                             optimizer_name="SGD",
                             learning_rate=0.1,
                            n_feats=n_feats,
                            n_classes=n_classes,
                            train=True,
                            snr=snr)
ds_val = SyntheticDataset(n_samples=n_samples,
                          lat_dim=true_lat_dims,
                          n_feats=n_feats,
                          n_classes=n_classes,
                          train=False,
                          snr=snr)
image_datasets = {"train": ds_train, "val": ds_val}
manager = DataManager.from_numpy(train_inputs=ds_train.data,
                                 train_outputs=None,
                                 train_labels=ds_train.labels,
                                 validation_inputs=ds_val.data,
                                 validation_outputs=None,
                                 validation_labels=ds_val.labels,
                                 batch_size=batch_size,
                                 sampler="random",
                                 add_input=True)
print("- datasets:", image_datasets)
print("- shapes:", ds_train.data.shape, ds_val.data.shape)

# Display generated data
method = manifold.TSNE(n_components=2, init="pca", random_state=0)
y_train = method.fit_transform(ds_train.data)
y_val = method.fit_transform(ds_val.data)
fig, axs = plt.subplots(nrows=3, ncols=2)
for cnt, (name, y, labels) in enumerate(
    (("train", y_train, ds_train.labels), ("val", y_val, ds_val.labels))):
    colors = labels.astype(float)