Exemplo n.º 1
0
def transform(cnf, n_iter, skip, test, train, weights_from, test_dir):

    config = util.load_module(cnf).config

    runs = {}
    if train:
        runs["train"] = config.get("train_dir")
    if test or test_dir:
        runs["test"] = test_dir or config.get("test_dir")

    net = nn.create_net(config)

    if weights_from is None:
        net.load_params_from(config.weights_file)
        print("loaded weights from {}".format(config.weights_file))
    else:
        weights_from = str(weights_from)
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))

    if n_iter > 1:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter, skip=skip, color_sigma=config.cnf["sigma"], **config.cnf["aug_params"]
        )
    else:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter, skip=skip, color_sigma=0.0, **data.no_augmentation_params
        )

    for run, directory in sorted(runs.items(), reverse=True):

        print("extracting features for files in {}".format(directory))
        tic = time.time()

        files = data.get_image_files(directory)

        Xs, Xs2 = None, None

        for i, (tf, color_vec) in enumerate(zip(tfs, color_vecs), start=1):

            print("{} transform iter {}".format(run, i))

            X = net.transform(files, transform=tf, color_vec=color_vec)
            if Xs is None:
                Xs = X
                Xs2 = X ** 2
            else:
                Xs += X
                Xs2 += X ** 2

            print("took {:6.1f} seconds".format(time.time() - tic))
            if i % 5 == 0 or n_iter < 5:
                std = np.sqrt((Xs2 - Xs ** 2 / i) / (i - 1))
                config.save_features(Xs / i, i, skip=skip, test=True if run == "test" else False)
                config.save_std(std, i, skip=skip, test=True if run == "test" else False)
                print("saved {} iterations".format(i))
Exemplo n.º 2
0
def transform(cnf=cnf,
              n_iter=n_iter,
              skip=skip,
              test=test,
              train=train,
              weights_from=weights_from,
              test_dir=test_dir):

    config = util.load_module(cnf).config

    config.cnf['batch_size_train'] = 128
    config.cnf['batch_size_test'] = 128

    runs = {}
    if train:
        runs['train'] = config.get('train_dir')
    if test or test_dir:
        runs['test'] = test_dir or config.get('test_dir')

    net = nn.create_net(config)

    if weights_from is None:
        net.load_params_from(config.weights_file)
        print("loaded weights from {}".format(config.weights_file))
    else:
        weights_from = str(weights_from)
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))

    if n_iter > 1:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter,
            skip=skip,
            color_sigma=config.cnf['sigma'],
            **config.cnf['aug_params'])
    else:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter, skip=skip, color_sigma=0.0, **data.no_augmentation_params)

    ret_val = []
    for run, directory in sorted(runs.items(), reverse=True):

        print("extracting features for files in {}".format(directory))
        tic = time.time()
        files = data.get_image_files(directory)

        Xs, Xs2 = None, None

        for i, (tf, color_vec) in enumerate(zip(tfs, color_vecs), start=1):

            print("{} transform iter {}".format(run, i))

            X = net.transform(files[:1000], transform=tf, color_vec=color_vec)
            ret_val.append(X)
    return ret_val, net
def estimate_scale(img):
    return np.maximum(img.shape[0], img.shape[1]) / 85.0

scale_factors = [estimate_scale, 5.0] # combine size-based rescaling + fixed rescaling
    

# augmentation_transforms_test = []
# for flip in [True, False]:
#     for zoom in [1/1.3, 1/1.2, 1/1.1, 1.0, 1.1, 1.2, 1.3]:
#         for rot in np.linspace(0.0, 360.0, 5, endpoint=False):
#             tf = data.build_augmentation_transform(zoom=(zoom, zoom), rotation=rot, flip=flip)
#             augmentation_transforms_test.append(tf)
augmentation_transforms_test = tta.build_quasirandom_transforms(70, **{
    'zoom_range': (1 / 1.4, 1.4),
    'rotation_range': (0, 360),
    'shear_range': (-10, 10),
    'translation_range': (-8, 8),
    'do_flip': True,
    'allow_stretch': 1.2,
})



data_loader = load.ZmuvMultiscaleDataLoader(scale_factors=scale_factors, num_chunks_train=num_chunks_train,
    patch_sizes=patch_sizes, chunk_size=chunk_size, augmentation_params=augmentation_params,
    augmentation_transforms_test=augmentation_transforms_test, validation_split_path=validation_split_path)


# Conv2DLayer = nn.layers.cuda_convnet.Conv2DCCLayer
# MaxPool2DLayer = nn.layers.cuda_convnet.MaxPool2DCCLayer

Conv2DLayer = tmp_dnn.Conv2DDNNLayer
Exemplo n.º 4
0
def transform(cnf, n_iter, skip, test, train, weights_from, test_dir):

    config = util.load_module(cnf).config

    config.cnf['batch_size_train'] = 128
    config.cnf['batch_size_test'] = 128

    runs = {}
    if train:
        runs['train'] = config.get('train_dir')
    if test or test_dir:
        runs['test'] = test_dir or config.get('test_dir')

    net = nn.create_net(config)

    if weights_from is None:
        net.load_params_from(config.weights_file)
        print("loaded weights from {}".format(config.weights_file))
    else:
        weights_from = str(weights_from)
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))

    if n_iter > 1:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter,
            skip=skip,
            color_sigma=config.cnf['sigma'],
            **config.cnf['aug_params'])
    else:
        tfs, color_vecs = tta.build_quasirandom_transforms(
            n_iter, skip=skip, color_sigma=0.0, **data.no_augmentation_params)

    for run, directory in sorted(runs.items(), reverse=True):

        print("extracting features for files in {}".format(directory))
        tic = time.time()

        files = data.get_image_files(directory)

        Xs, Xs2 = None, None

        for i, (tf, color_vec) in enumerate(zip(tfs, color_vecs), start=1):

            print("{} transform iter {}".format(run, i))

            X = net.transform(files, transform=tf, color_vec=color_vec)
            if Xs is None:
                Xs = X
                Xs2 = X**2
            else:
                Xs += X
                Xs2 += X**2

            print('took {:6.1f} seconds'.format(time.time() - tic))
            if i % 10 == 0 or n_iter < 5:
                std = np.sqrt((Xs2 - Xs**2 / i) / (i - 1))
                config.save_features(Xs / i,
                                     i,
                                     skip=skip,
                                     test=True if run == 'test' else False)
                config.save_std(std,
                                i,
                                skip=skip,
                                test=True if run == 'test' else False)
                print('saved {} iterations'.format(i))
Exemplo n.º 5
0
def estimate_scale(img):
    return np.maximum(img.shape[0], img.shape[1]) / 85.0


# augmentation_transforms_test = []
# for flip in [True, False]:
#     for zoom in [1/1.3, 1/1.2, 1/1.1, 1.0, 1.1, 1.2, 1.3]:
#         for rot in np.linspace(0.0, 360.0, 5, endpoint=False):
#             tf = data.build_augmentation_transform(zoom=(zoom, zoom), rotation=rot, flip=flip)
#             augmentation_transforms_test.append(tf)
augmentation_transforms_test = tta.build_quasirandom_transforms(
    70, **{
        'zoom_range': (1 / 1.4, 1.4),
        'rotation_range': (0, 360),
        'shear_range': (-10, 10),
        'translation_range': (-8, 8),
        'do_flip': True,
        'allow_stretch': 1.2,
    })

data_loader = load.ZmuvRescaledDataLoader(
    estimate_scale=estimate_scale,
    num_chunks_train=num_chunks_train,
    patch_size=patch_size,
    chunk_size=chunk_size,
    augmentation_params=augmentation_params,
    augmentation_transforms_test=augmentation_transforms_test,
    validation_split_path=validation_split_path)

# Conv2DLayer = nn.layers.cuda_convnet.Conv2DCCLayer
Exemplo n.º 6
0
from __future__ import division
import time

import click
import numpy as np

import nn
import data_orig
import tta
import utils

cnf='configs/c_512_5x5_32.py'
config = utils.load_module(cnf).config
config.cnf['batch_size_train'] = 128

runs = {}
runs['train'] = config.get('train_dir')

net = nn.create_net(config)

weights_from = 'weights/c_512_5x5_32/weights_final.pkl'
net.load_params_from(weights_from)

tf, color_vecs = tta.build_quasirandom_transforms(1, skip=0, color_sigma=0.0, **data_orig.no_augmentation_params)
for i, (tf, color_vec) in enumerate(zip(tfs, color_vecs), start=1):
    pass



Exemplo n.º 7
0
def transform(cnf, exp_run_folder, n_iter, skip, test, train, weights_from,  test_dir, fold):

    config = util.load_module(cnf).config
    config.cnf['fold'] = fold                           # <-- used to change the directories for weights_best, weights_epoch and weights_final
    config.cnf['exp_run_folder'] = exp_run_folder

    runs = {}
    if train:
        runs['train'] = config.get('train_dir')
    if test or test_dir:
        runs['test'] = test_dir or config.get('test_dir')

    folds = yaml.load(open('folds/'+data.settings['protocol']+'.yml'))
    f0, f1 = fold.split('x')
    train_list = folds['Fold_' + f0][int(f1)-1]
    test_list  = folds['Fold_' + f0][0 if f1=='2' else 1]

    net = nn.create_net(config)

    if weights_from is None:
        net.load_params_from(config.weights_file)
        print("loaded weights from {}".format(config.weights_file))
    else:
        weights_from = str(weights_from)
        net.load_params_from(weights_from)
        print("loaded weights from {}".format(weights_from))

    if n_iter > 1:
        tfs, color_vecs = tta.build_quasirandom_transforms(
                n_iter, skip=skip, color_sigma=config.cnf['sigma'],
                **config.cnf['aug_params'])
    else:
        tfs, color_vecs = tta.build_quasirandom_transforms(
               n_iter, skip=skip, color_sigma=0.0,
                **data.no_augmentation_params)

    for run, directory in sorted(runs.items(), reverse=True):

        print("extracting features for files in {}".format(directory))
        tic = time.time()

        if run == 'train':
            files = data.get_image_files(directory, train_list)
        else:
            files = data.get_image_files(directory, test_list)

        Xs, Xs2 = None, None

        for i, (tf, color_vec) in enumerate(zip(tfs, color_vecs), start=1):

            print("{} transform iter {}".format(run, i))

            X = net.transform(files, transform=tf, color_vec=color_vec)
            if Xs is None:
                Xs = X
                Xs2 = X**2
            else:
                Xs += X
                Xs2 += X**2

            print('took {:6.1f} seconds'.format(time.time() - tic))
            if i % 5 == 0 or n_iter < 5:
                std = np.sqrt((Xs2 - Xs**2 / i) / (i - 1))
                config.save_features_fold(Xs / i, i, skip=skip, fold=fold,
                                     test=True if run == 'test' else False)
                #config.save_std_fold(std, i, skip=skip, fold=fold,
                #               test=True if run == 'test' else False)
                print('saved {} iterations'.format(i))
Exemplo n.º 8
0
scale_factors = [estimate_scale, 2.0, 5.0]  # combine size-based rescaling + fixed rescaling


# augmentation_transforms_test = []
# for flip in [True, False]:
#     for zoom in [1/1.3, 1/1.2, 1/1.1, 1.0, 1.1, 1.2, 1.3]:
#         for rot in np.linspace(0.0, 360.0, 5, endpoint=False):
#             tf = data.build_augmentation_transform(zoom=(zoom, zoom), rotation=rot, flip=flip)
#             augmentation_transforms_test.append(tf)
augmentation_transforms_test = tta.build_quasirandom_transforms(
    70,
    **{
        "zoom_range": (1 / 1.4, 1.4),
        "rotation_range": (0, 360),
        "shear_range": (-10, 10),
        "translation_range": (-8, 8),
        "do_flip": True,
        "allow_stretch": 1.2,
    }
)


data_loader = load.ZmuvMultiscaleDataLoader(
    scale_factors=scale_factors,
    num_chunks_train=num_chunks_train,
    patch_sizes=patch_sizes,
    chunk_size=chunk_size,
    augmentation_params=augmentation_params,
    augmentation_transforms_test=augmentation_transforms_test,
)