Exemple #1
0
def segment_from_directory(
        directory, 
        suffix,
        affinities_channels, 
        centroids_channel, 
        thresholding_channel, 
        scale = (4, 1, 1),
        w_scale=None, 
        compactness=0.,
        display=True, 
        validation=False, 
        **kwargs
        #
    ):
    images, _, output, GT = get_dataset(directory, 
                                        GT=True, 
                                        validation=validation)
    images = da.squeeze(images)
    print(output.shape)
    segmentations = []
    masks = []
    scores = {'GT | Output' : [], 'Output | GT' : []}
    for i in range(output.shape[0]):
        gt = GT[i].compute()
        seg, _, mask = segment_output_image(
                output[i], 
                affinities_channels, 
                centroids_channel, 
                thresholding_channel, 
                scale=w_scale, 
                compactness=0.)
        vi = variation_of_information(gt, seg)
        scores['GT | Output'].append(vi[0])
        scores['Output | GT'].append(vi[1])
        seg = da.from_array(seg)
        segmentations.append(seg)
        masks.append(mask)
    segmentations = da.stack(segmentations)
    masks = da.stack(masks)
    # Save the VI data
    scores = pd.DataFrame(scores)
    if validation:
        s = 'validation_VI.csv'
    else:
        s = '_VI.csv'
    s_path = os.path.join(directory, suffix + s)
    scores.to_csv(s_path)
    gt_o = scores['GT | Output'].mean()
    o_gt = scores['Output | GT'].mean()
    print(f'Conditional entropy H(GT|Output): {gt_o}')
    print(f'Conditional entropy H(Output|GT): {o_gt}')
    if display:
        # Now Display
        z_affs = output[:, affinities_channels[0], ...]
        y_affs = output[:, affinities_channels[1], ...]
        x_affs = output[:, affinities_channels[2], ...]
        c = output[:, thresholding_channel, ...]
        cl = output[:, centroids_channel, ...]
        v_scale = [1] * len(images.shape)
        v_scale[-3:] = scale
        print(images.shape, v_scale, z_affs.shape, masks.shape)
        v = napari.Viewer()
        v.add_image(images, name='Input images', blending='additive', visible=True, scale=v_scale)
        v.add_image(c, name='Thresholding channel', blending='additive', visible=False, scale=v_scale)
        v.add_image(cl, name='Centroids channel', blending='additive', visible=False, scale=v_scale)
        v.add_image(z_affs, name='z affinities', blending='additive', visible=False, scale=v_scale, 
                    colormap='bop purple')
        v.add_image(y_affs, name='y affinities', blending='additive', visible=False, scale=v_scale, 
                    colormap='bop orange')
        v.add_image(x_affs, name='x affinities', blending='additive', visible=False, scale=v_scale, 
                    colormap='bop blue')
        v.add_labels(masks, name='Masks', blending='additive', visible=False, scale=v_scale)
        v.add_labels(GT, name='Ground truth', blending='additive', visible=False, scale=v_scale)
        v.add_labels(segmentations, name='Segmentations', blending='additive', visible=True, 
                     scale=v_scale)
        napari.run()
Exemple #2
0
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
import helpers
import rpn

args = helpers.handle_args()
if args.handle_gpu:
    helpers.handle_gpu_compatibility()

batch_size = 8
epochs = 50
load_weights = False
hyper_params = helpers.get_hyper_params()

VOC_train_data, VOC_info = helpers.get_dataset("voc/2007", "train+validation")
VOC_val_data, _ = helpers.get_dataset("voc/2007", "test")
VOC_train_total_items = helpers.get_total_item_size(VOC_info, "train+validation")
VOC_val_total_items = helpers.get_total_item_size(VOC_info, "test")
step_size_train = helpers.get_step_size(VOC_train_total_items, batch_size)
step_size_val = helpers.get_step_size(VOC_val_total_items, batch_size)
labels = helpers.get_labels(VOC_info)
# We add 1 class for background
hyper_params["total_labels"] = len(labels) + 1
# If you want to use different dataset and don't know max height and width values
# You can use calculate_max_height_width method in helpers
max_height, max_width = helpers.VOC["max_height"], helpers.VOC["max_width"]
VOC_train_data = VOC_train_data.map(lambda x : helpers.preprocessing(x, max_height, max_width))
VOC_val_data = VOC_val_data.map(lambda x : helpers.preprocessing(x, max_height, max_width))

padded_shapes, padding_values = helpers.get_padded_batch_params()
Exemple #3
0
        try:
            if new[c[-3], c[-2], c[-1]] == 1:
                new_cent.append(c)
        except IndexError:
            pass
    #print('min: ', np.min(a_s), ' max: ', np.max(a_s))
    return new, np.array(new_cent)



if __name__ == '__main__':
    import os
    data_dir = '/Users/amcg0011/Data/pia-tracking/cang_training'
    train_dir = os.path.join(data_dir, '210416_161026_EWBCE_2F_z-1_z-2_y-1_y-2_y-3_x-1_x-2_x-3_c_cl')
    channels = ('z-1', 'z-2','y-1', 'y-2', 'y-3', 'x-1', 'x-2', 'x-3', 'centreness', 'centreness-log')
    images, labs, output = get_dataset(train_dir)
    #o88 = output[88]
    aff_chans = (0, 2, 5)
    cent_chan = 9
    mask_chan = 8
    #seg88, s88 = segment_output_image(o88, aff_chans, cent_chan, mask_chan) #, scale=(4, 1, 1))
    #seg88s, s88s = segment_output_image(o88, aff_chans, cent_chan, mask_chan, scale=(4, 1, 1))
    #seg88c, s88c = segment_output_image(o88, aff_chans, cent_chan, mask_chan, compactness=0.5) #, scale=(4, 1, 1))
    #i88 = images[88]
    #l88 = labs[88]
    #v = napari.view_image(i88, name='image', scale=(4, 1, 1), blending='additive')
    #v.add_labels(l88, name='labels', scale=(4, 1, 1), visible=False)
    #v.add_image(o88[aff_chans[0]], name='z affinities', 
              #  colormap='bop purple', scale=(4, 1, 1), 
               # visible=False, blending='additive')
    #v.add_image(o88[aff_chans[1]], name='y affinities', 
from tensorflow.keras.models import Sequential
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import helpers
import rpn
import faster_rcnn

args = helpers.handle_args()
if args.handle_gpu:
    helpers.handle_gpu_compatibility()

mode = "inference"
batch_size = 1
hyper_params = helpers.get_hyper_params()

VOC_test_data, VOC_info = helpers.get_dataset("voc/2007", "test")
labels = helpers.get_labels(VOC_info)
# We add 1 class for background
hyper_params["total_labels"] = len(labels) + 1
# If you want to use different dataset and don't know max height and width values
# You can use calculate_max_height_width method in helpers
max_height, max_width = helpers.VOC["max_height"], helpers.VOC["max_width"]
VOC_test_data = VOC_test_data.map(
    lambda x: helpers.preprocessing(x, max_height, max_width))

padded_shapes, padding_values = helpers.get_padded_batch_params()
VOC_test_data = VOC_test_data.padded_batch(batch_size,
                                           padded_shapes=padded_shapes,
                                           padding_values=padding_values)

base_model = VGG16(include_top=False)
Exemple #5
0
    dataset_name = "mnist"
    model_name = AVAILABLE_MODELS[args.model]
    loss_fn = AVAILABLE_LOSS_FNS[args.loss]
    batch_size = config["batch_size"][dataset_name.lower()]
    train_subset_size = config["train_subset_size"][model_name.lower()]
    epochs = args.epochs if args.epochs else config["epochs"]
    previous_model = None
    if args.prev:
        if loss_fn not in args.prev:
            raise RuntimeError("Use same loss function")
        if not os.path.exists(args.prev):
            raise FileNotFoundError("Couldn't find {}".format(args.prev))
        previous_model = torch.load(args.prev)
    num_params = config["num_params"][model_name]
    num_models = None
    if args.num:
        num_models = args.num
    model = get_model_by_name(model_name)
    dataset = get_dataset(dataset_name, train_subset_size, batch_size)

    training = Training(model,
                        dataset,
                        num_params,
                        epochs,
                        prev_model=previous_model,
                        num_models=num_models,
                        loss_fn=loss_fn)
    training.start()
    training.save()
Exemple #6
0
def get_submission(X_train,
                   X_valid,
                   y_train,
                   y_valid,
                   X_test,
                   weights=None,
                   model=MLPRegressor,
                   max_epoch=200,
                   base_lr=0.1,
                   momentum=0.9,
                   weight_decay=0.0001,
                   batch_size=128,
                   train_params={},
                   plot=True,
                   zero_predict=False,
                   test_along=False,
                   optimizer='sgd',
                   hyper={},
                   save=False,
                   load=False,
                   mdl_name='mlp.pt',
                   all_train=False):
    if all_train:
        if X_valid is not None:
            X_train = pd.concat([X_train, X_valid])
            y_train = pd.concat([y_train, y_valid])
            X_valid = None
            y_valid = None
        # test_along = False

        # train_set, valid_set, X_test_np, X_train_np, y_train_np, X_valid_np, y_valid_np, _ = get_dataset(
        #     X_train.values, y_train.values, X_test.values, valid_size=0.2
        # )
        train_set, valid_set, X_test, X_train, y_train, X_valid, y_valid, _ = get_dataset(
            X_train, y_train, X_test, valid_size=0.2)
    else:
        # train_set, valid_set, X_test_np, X_train_np, y_train_np, X_valid_np, y_valid_np, _ = get_dataset(
        #     X_train.values, y_train.values, X_test.values, X_valid.values, y_valid.values
        # )
        train_set, valid_set, X_test, X_train, y_train, X_valid, y_valid, _ = get_dataset(
            X_train, y_train, X_test, X_valid, y_valid)

    PATH = './saved_models'
    if not os.path.isdir(PATH): os.makedirs(PATH)

    start_time = time.time()
    end_time = start_time
    if load:
        trainer = Trainer(torch.load(os.path.join(PATH, mdl_name)),
                          train_set=train_set,
                          loss_fn=F.l1_loss,
                          hyper=hyper,
                          valid_set=valid_set,
                          weights=weights,
                          batch_size=batch_size,
                          epochs=max_epoch,
                          optimizer=optimizer)

    else:
        trainer = Trainer(model(**train_params),
                          train_set=train_set,
                          loss_fn=F.l1_loss,
                          hyper=hyper,
                          valid_set=valid_set,
                          weights=weights,
                          batch_size=batch_size,
                          epochs=max_epoch,
                          optimizer=optimizer)

        valid_hist = []
        for epochs in range(max_epoch):
            trainer.train_epoch()

            temp_lr = trainer.optimizer.param_groups[0]['lr']

            if test_along:
                temp_valid = trainer.loss_epoch()
                valid_hist.append(temp_valid)
                print(
                    'Epoch {:3}: Training MAE={:8.2f}, Valid MAE={:8.2f}, lr={}'
                    .format(epochs, trainer.eval(load='train'), temp_valid,
                            temp_lr))
            else:
                print('Epoch {:3}: Training MAE={:8.2f}, lr={}'.format(
                    epochs, trainer.eval(load='train'), temp_lr))
        end_time = time.time()

        if plot:
            t_step = np.arange(0, max_epoch, 1)
            train_hist = trainer.evaluator_loss.hist
            fig_path = 'figures'
            if not os.path.isdir(fig_path): os.makedirs(fig_path)
            plt.figure()
            plt.plot(t_step, train_hist, 'r', ls='-', label='training MAE')
            if test_along:
                plt.plot(t_step,
                         valid_hist,
                         'b',
                         ls='--',
                         label='validation MAE')
            plt.legend(loc='best')
            plt.xlabel('steps')
            plt.title('Training and Validation MAE')
            plt.grid()
            plt.savefig(os.path.join(fig_path, 'training_plot.png'))
            plt.close()

        if save:
            torch.save(trainer.model, os.path.join(PATH, mdl_name))

    # train_loss = trainer.loss_epoch(load='train')
    # valid_loss = trainer.loss_epoch(load='valid')

    train_pred = trainer.predict(torch.FloatTensor(
        X_train.values)).cpu().data.numpy()
    train_loss = mean_absolute_error(y_train.values, train_pred)

    if X_valid is not None:
        valid_pred = trainer.predict(torch.FloatTensor(
            X_valid.values)).cpu().data.numpy()
        valid_loss = mean_absolute_error(y_valid.values, valid_pred)
    else:
        valid_pred = None
        valid_loss = 0

    test_pred = trainer.predict(torch.FloatTensor(
        X_test.values)).cpu().data.numpy()

    # print('>>> original valid loss: {}'.format(valid_loss))
    # valid_pred[valid_pred<=50] = 0
    # valid_pred[np.absolute(valid_pred-100)<50] = 100
    # new_valid_loss = mean_absolute_error(y_valid.values, valid_pred)
    # print('>>> New valid loss: {}'.format(new_valid_loss))

    if zero_predict:
        # zero_predictor = load_obj('xgb_class')
        # valid_pred_zeros = zero_predictor.predict(X_valid.values)

        zero_predictor = torch.load(os.path.join(PATH, 'mlp_class.pt'))
        zero_predictor.eval()  # evaluation mode
        inp = torch.FloatTensor(X_valid.values)
        with torch.no_grad():
            if torch.cuda.is_available():
                inp = torch.autograd.Variable(inp.cuda())
            else:
                inp = torch.autograd.Variable(inp)
        valid_pred_zeros = zero_predictor(inp).cpu().data.numpy().argsort(
            axis=1)[:, -1]

        # train_pred_zeros = zero_predictor.predict(X_train.values)
        # train_output[train_pred_zeros == 0] = 0
        print('>>> original valid loss: {}'.format(valid_loss))
        valid_pred[valid_pred_zeros == 0, :] = 0

        new_valid_loss = mean_absolute_error(y_valid.values, valid_pred)
        print('>>> New valid loss: {}'.format(new_valid_loss))

    state_dict = trainer.model.state_dict()
    if torch.cuda.device_count() > 1:
        input_weights = state_dict['module.regressor.fc0.weight'].cpu().numpy()
    else:
        input_weights = state_dict['regressor.fc0.weight'].cpu().numpy()
    # assume std deviation of each feature is 1
    avg_w = np.mean(np.abs(input_weights), axis=0)
    feature_importances = avg_w

    feature_names = X_train.columns.values
    sorted_idx = np.argsort(feature_importances * -1)  # descending order

    summary = '====== MLPRegressor Training Summary ======\n'
    summary += '>>> epochs={}, lr={}, momentum={}, weight_decay={}\n'.format(
        max_epoch, base_lr, momentum, weight_decay)
    summary += '>>> schedule={}\n'.format(
        hyper['lr_schedule']) if 'lr_schedule' in hyper else 'None'
    summary += '>>> hidden={}, optimizer="{}", batch_size={}\n'.format(
        train_params['num_neuron'], optimizer, batch_size)
    for idx in sorted_idx:
        summary += '[{:<30s}] {:<10.4f}\n'.format(feature_names[idx],
                                                  feature_importances[idx])
    summary += '>>> training_time={:10.2f}min\n'.format(
        (end_time - start_time) / 60)
    summary += '>>> Final MAE: {:10.4f}(Training), {:10.4f}(Validation)\n'.format(
        train_loss, valid_loss)

    # Generate submission
    submission = pd.DataFrame(data=test_pred,
                              index=X_test.index,
                              columns=['Next_Premium'])

    submission_train = pd.DataFrame(data=train_pred,
                                    index=X_train.index,
                                    columns=['Next_Premium'])

    submission_valid = pd.DataFrame(
        data=valid_pred, index=X_valid.index,
        columns=['Next_Premium']) if valid_pred is not None else None

    return {
        'model': trainer,
        'submission': submission,
        'submission_train': submission_train,
        'submission_valid': submission_valid,
        'valid_loss': valid_loss,
        'summary': summary
    }
Exemple #7
0
def get_submission(X_train,
                   X_valid,
                   y_train,
                   y_valid,
                   X_test,
                   model=ConvNet1D,
                   max_epoch=200,
                   base_lr=0.1,
                   momentum=0.9,
                   weight_decay=0.0001,
                   batch_size=128,
                   train_params={},
                   plot=True,
                   test_along=False,
                   optimizer='sgd',
                   hyper={},
                   save=False,
                   load=False,
                   mdl_name='cnn.pt'):
    train_set, valid_set, X_test_np, X_train_np, X_valid_np, _ = get_dataset(
        X_train.values, y_train.values, X_test.values, X_valid.values,
        y_valid.values)

    PATH = './saved_model'
    if not os.path.isdir(PATH): os.makedirs(PATH)

    start_time = time.time()
    end_time = start_time
    if load:
        trainer = Trainer(torch.load(os.path.join(PATH, mdl_name)),
                          train_set=train_set,
                          loss_fn=F.l1_loss,
                          hyper=hyper,
                          valid_set=valid_set,
                          batch_size=batch_size,
                          epochs=max_epoch,
                          optimizer=optimizer)
    else:
        trainer = Trainer(model(**train_params),
                          train_set=train_set,
                          loss_fn=F.l1_loss,
                          hyper=hyper,
                          valid_set=valid_set,
                          batch_size=batch_size,
                          epochs=max_epoch,
                          optimizer=optimizer)

        valid_hist = []
        for epochs in range(max_epoch):
            trainer.train_epoch()

            temp_lr = trainer.optimizer.param_groups[0]['lr']

            if test_along:
                temp_valid = trainer.loss_epoch()
                valid_hist.append(temp_valid)
                print(
                    'Epoch {:3}: Training MAE={:8.2f}, Valid MAE={:8.2f}, lr={}'
                    .format(epochs, trainer.eval(), temp_valid, temp_lr))
            else:
                print('Epoch {:3}: Training MAE={:8.2f}, lr={}'.format(
                    epochs, trainer.eval(), temp_lr))
        end_time = time.time()

        if plot:
            t_step = np.arange(0, max_epoch, 1)
            train_hist = trainer.evaluator.hist
            fig_path = 'figures'
            if not os.path.isdir(fig_path): os.makedirs(fig_path)
            plt.figure()
            plt.plot(t_step, train_hist, 'r', ls='-', label='training MAE')
            if test_along:
                plt.plot(t_step,
                         valid_hist,
                         'b',
                         ls='--',
                         label='validation MAE')
            plt.legend(loc='best')
            plt.xlabel('steps')
            plt.title('Training and Validation MAE')
            plt.grid()
            plt.savefig(os.path.join(fig_path, 'training_plot.png'))
            plt.close()

        if save:
            torch.save(trainer.model, os.path.join(PATH, mdl_name))

    train_loss = trainer.loss_epoch(load='train')
    valid_loss = trainer.loss_epoch(load='valid')

    state_dict = trainer.model.state_dict()
    if torch.cuda.device_count() > 1:
        input_weights = state_dict['module.regressor.fc0.weight'].cpu().numpy()
    else:
        input_weights = state_dict['regressor.fc0.weight'].cpu().numpy()
    # assume std deviation of each feature is 1
    avg_w = np.mean(np.abs(input_weights), axis=0)
    feature_importances = avg_w

    feature_names = X_train.columns.values
    sorted_idx = np.argsort(feature_importances * -1)  # descending order

    summary = '====== MLPRegressor Training Summary ======\n'
    summary += '>>> epochs={}, lr={}, momentum={}, weight_decay={}\n'.format(
        max_epoch, base_lr, momentum, weight_decay)
    summary += '>>> schedule={}\n'.format(hyper['lr_schedule'])
    summary += '>>> hidden={}, optimizer="{}", batch_size={}\n'.format(
        train_params['num_neuron'], optimizer, batch_size)
    for idx in sorted_idx:
        summary += '[{:<25s}] {:<10.4f}\n'.format(feature_names[idx],
                                                  feature_importances[idx])
    summary += '>>> training_time={:10.2f}min\n'.format(
        (end_time - start_time) / 60)
    summary += '>>> Final MAE: {:10.4f}(Training), {:10.4f}(Validation)\n'.format(
        train_loss, valid_loss)

    # Generate submission
    test_output = trainer.predict(
        torch.FloatTensor(X_test_np)).cpu().data.numpy()
    submission = pd.DataFrame(data=test_output,
                              index=X_test.index,
                              columns=['Next_Premium'])

    train_output = trainer.predict(
        torch.FloatTensor(X_train_np)).cpu().data.numpy()
    submission_train = pd.DataFrame(data=train_output,
                                    index=X_train.index,
                                    columns=['Next_Premium'])

    valid_output = trainer.predict(
        torch.FloatTensor(X_valid_np)).cpu().data.numpy()
    submission_valid = pd.DataFrame(data=valid_output,
                                    index=X_valid.index,
                                    columns=['Next_Premium'])

    return {
        'model': trainer,
        'submission': submission,
        'submission_train': submission_train,
        'submission_valid': submission_valid,
        'valid_loss': valid_loss,
        'summary': summary
    }