Esempio n. 1
0
    def train_epoch(X, y, LR):

        loss = 0
        batches = len(X) / batch_size
        for i in range(batches):
            train_x_ = X[i * batch_size:(i + 1) * batch_size].transpose(
                [0, 2, 3, 1])
            train_x_ = augment_images(train_x_,
                                      rotation_range=30,
                                      height_shift_range=0.1,
                                      width_shift_range=0.1,
                                      shear_range=0.1,
                                      zoom_range=(1, 1))
            train_x_ = adapt_to_binareye(train_x_.transpose([0, 3, 1, 2]),
                                         filters=64)
            #new_loss, new_err, new_1w0, new_0w1, new_precision,new_recall = train_fn(train_x_,np.subtract(np.multiply(train_y[i*batch_size:(i+1)*batch_size],2),1),LR)
            loss += train_fn(train_x_, y[i * batch_size:(i + 1) * batch_size],
                             LR)

        loss /= batches

        return loss
Esempio n. 2
0
    def train_epoch(train_x, train_y, train_loss_array, train_err_array,
                    train_1w0_array, train_0w1_array, train_precision_array,
                    train_recall_array, LR, LR_decay, directory):

        loss = 0
        batch = 0
        nr_batches = train_y.shape[0] / batch_size
        minibatch_start_time = time.time()
        np.save('./train_val_data/batches_' + run_name + '.npy', nr_batches)
        #for x_batch, y_batch in datagen_tr.flow_from_directory(directory,target_size=(32,32),batch_size=batch_size,class_mode='categorical'):
        #for x_batch, y_batch in datagen_tr.flow(train_x,train_y,batch_size=batch_size):
        for i in range(nr_batches):
            train_x_ = augment_images(train_x[i * batch_size:(i + 1) *
                                              batch_size],
                                      rotation_range=30,
                                      height_shift_range=0.1,
                                      width_shift_range=0.1,
                                      shear_range=0.1,
                                      zoom_range=(1, 1))
            train_x_ = adapt_to_binareye(train_x_.transpose([0, 3, 1, 2]),
                                         filters=num_filters)
            new_loss, new_err, new_1w0, new_0w1, new_precision, new_recall = train_fn(
                train_x_,
                np.subtract(
                    np.multiply(train_y[i * batch_size:(i + 1) * batch_size],
                                2), 1), LR)
            loss += new_loss
            train_loss_array = np.append(train_loss_array, new_loss)
            np.save('./train_val_data/train_loss_' + run_name + '.npy',
                    train_loss_array)
            train_err_array = np.append(train_err_array, new_err)
            np.save('./train_val_data/train_err_' + run_name + '.npy',
                    train_err_array)
            train_1w0_array = np.append(train_1w0_array, new_1w0)
            np.save('./train_val_data/train_1w0_' + run_name + '.npy',
                    train_1w0_array)
            train_0w1_array = np.append(train_0w1_array, new_0w1)
            np.save('./train_val_data/train_0w1_' + run_name + '.npy',
                    train_0w1_array)
            train_precision_array = np.append(train_precision_array,
                                              new_precision)
            np.save('./train_val_data/train_precision_' + run_name + '.npy',
                    train_precision_array)
            train_recall_array = np.append(train_recall_array, new_recall)
            np.save('./train_val_data/train_recall_' + run_name + '.npy',
                    train_recall_array)
            if (batch % 10 == 0):
                minibatch_duration = time.time() - minibatch_start_time
                print(
                    'batch {:4d} / {} | loss: {:4f} | error: {} | precision: {} | recall: {} | LR = {} | time = {}'
                    .format(batch, nr_batches, new_loss + 0, new_err,
                            new_precision, new_recall, LR, minibatch_duration))
                minibatch_start_time = time.time()
            batch += 1
            LR *= LR_decay
            if batch >= nr_batches:
                break

        loss /= nr_batches

        return loss, LR, train_loss_array, train_err_array, train_1w0_array, train_0w1_array, train_precision_array, train_recall_array
Esempio n. 3
0
def get_random_chunks(image,
                      labels,
                      out_dir,
                      shape=(10, 256, 256),
                      n=25,
                      min_affinity=300,
                      channels=('z-1', 'y-1', 'x-1', 'centreness'),
                      scale=(4, 1, 1),
                      log=True):
    '''
    Obtain random chunks of data from whole ground truth volumes.

    Parameters
    ----------
    image: array like
        same shape as labels
    labels: array like
        same shape as image
    shape: tuple of int
        shape of chunks to obtain
    n: int
        number of random chunks to obtain
    min_affinity:
        minimum cut off sum of affinities for an image.
        As affinities as belong to {0, 1}, this param
        is the number of voxels that boarder labels.
    
    Returns
    -------
    xs: list of torch.Tensor
        List of images for training 
    ys: list of torch.Tensor
        List of affinities for training
    ids: list of str
        ID strings by which each image and label are named.
        Eventually used for correctly labeling network output
 
    '''
    im = np.array(image)
    l = np.array(labels)
    assert len(im.shape) == len(shape)
    a = get_training_labels(l, channels=channels, scale=scale)
    xs = []
    ys = []
    labs = []
    i = 0
    df = {'z_start': [], 'y_start': [], 'x_start': []}
    while i < n:
        dim_randints = []
        for j, dim in enumerate(shape):
            max_ = im.shape[j] - dim - 1
            ri = np.random.randint(0, max_)
            dim_randints.append(ri)
        # Get the network output: affinities
        s_ = [
            slice(None, None),
        ]  #
        for j in range(len(shape)):
            s_.append(slice(dim_randints[j], dim_randints[j] + shape[j]))
        s_ = tuple(s_)
        y = a[s_]
        if y.sum() > min_affinity * len(
                channels
        ):  # if there are a sufficient number of boarder voxels
            # add coords to output df
            for j in range(len(shape)):
                _add_to_dataframe(j, dim_randints[j], df)
            # Get the network input: image
            s_ = [
                slice(dim_randints[j], dim_randints[j] + shape[j])
                for j in range(len(shape))
            ]
            s_ = tuple(s_)
            x = im[s_]
            x = normalise_data(x)
            # get the GT labels so that later quatitative comparison can be made with final
            #   segmentation
            lab = l[s_]  # that's right, be confused by my variable names!!
            # data augmentation for better generalisation
            x, y, lab = augment_images(x, y, lab)
            # add the affinities and image chunk to the training data
            y = torch.from_numpy(y.copy())
            ys.append(y)
            x = torch.from_numpy(x.copy())
            xs.append(x)
            labs.append(lab)
            # another successful addition, job well done you crazy mofo
            i += 1
    print(LINE)
    s = f'Obtained {n} {shape} chunks of training data'
    print(s)
    if log:
        write_log(LINE, out_dir)
        write_log(s, out_dir)
    log_dir = log_dir_or_None(log, out_dir)
    print_labels_info(channels, out_dir=log_dir)
    ids = save_random_chunks(xs, ys, labs, out_dir)
    now = datetime.now()
    d = now.strftime("%y%m%d_%H%M%S")
    df['data_ids'] = ids
    df = pd.DataFrame(df)
    df.to_csv(os.path.join(out_dir, 'start_coords' + d + '.csv'))
    return xs, ys, ids
Esempio n. 4
0
    if not isdir(OUTPATH):
        print(f"Creating new folder {OUTPATH}.")
        mkdir(OUTPATH)
    if not os.listdir(OUTPATH):

        if TRAIN_ON_ROBIN:
            files_robin = aug.loadAllFiles(ROBINPATH)

        if TRAIN_ON_COMPLEX:
            files_complex = aug.loadAllFiles(COMPLEXPATH)

        if TRAIN_ON_ROBIN:
            print(f"Preparing the ROBIN files...")
            aug.augment_images(images=files_robin,
                               outpath=OUTPATH,
                               filename='robin_data',
                               img_size=(IMG_SIZE[1], IMG_SIZE[0]),
                               saveiter=SAVE_N_AUG_IMAGES_PER_NPY)

        if TRAIN_ON_COMPLEX:
            print(f"Preparing the COMPLEX files...")
            aug.augment_images(images=files_complex,
                               outpath=OUTPATH,
                               filename='complex_data',
                               img_size=(IMG_SIZE[1], IMG_SIZE[0]),
                               saveiter=SAVE_N_AUG_IMAGES_PER_NPY)

    # Train the GAN
    gan = GAN()
    gan.train(epochs=EPOCHS, sample_interval=SAMPLE_INTERVAL)