Exemple #1
0
def get_network():
    network_type = get_params().network_type
    if network_type not in get_modules():
        raise Exception('Network type {}, not found, available types are: {}'.format(network_type, get_modules()))

    module = importlib.import_module('.%s' % network_type, __package__)
    return getattr(module, 'network')
Exemple #2
0
def get_dataset(raw=False, is_test=False):
    """Get the dataset class according to the neptune parameters."""
    params = get_params()
    loader = DatasetLoader(path=params.path,
                           img_type=params.img_type,
                           remove_bad=params.remove_bad_id,
                           resize=params.resize,
                           split=params.validation_split,
                           load_test_set=is_test)
    if raw:
        return loader
    return loader.get_dataset()
Exemple #3
0
def get_augmenter():
    """Load an augmenter class for training / inference.

    Returns
    -------
    AugmentImages
        A class that will apply the desired pre-processing  / augmentations of images
        Main methods are
            .apply(img, mask) # Mask is optional
            .apply_preprocess(img) # If preprocessing is needed, it's done here. This should
                                     also be done for the test set.

    """
    # Instantiate
    augmenter = AugmentImages()

    # Fetch what augmentations are requested. Augmentations start with aug_SOME_NAME
    params = get_params(as_dict=True)
    for param in params:
        if 'aug_' in param:

            # If false, dont go further
            if not params[param]:
                continue

            # Check if this augmentation exists and add it
            aug_name = param.replace('aug_', '')
            if aug_name in aug.pre_process and params[param]:
                augmenter.add_pre_process(aug.pre_process[aug_name])
            elif aug_name in aug.image:
                augmenter.add_images_only(aug.image[aug_name])
            elif aug_name in aug.mask:
                augmenter.add_masks_only(aug.mask[aug_name])
            elif aug_name in aug.image_and_masks:
                augmenter.add_both(aug.image_and_masks[aug_name])

            print("Applied %s" % param)

    # Apply normalization method
    augmenter.set_normalization_method(aug.get_norm_method(params['network_type']))

    return augmenter
Exemple #4
0
"""Contains all estimator methods."""
import os
import tensorflow as tf
from src.trainer.model import _model_fn
from src.input_pipe import get_input_fn
from src.lib.neptune import get_params, NeptuneCollector
from src.lib.tf_ops import EarlyStopping, Logger
from src.input_pipe.image_converter import get_augmenter

params = get_params()

# Set logger class, make sure to send it relevant data for other modules to use
train_logger = Logger()
train_logger.log_learning_rate(params.learning_rate)


def get_vgg16_variable_change():
    """VGG 16 variable conversions."""

    stack_1 = {
        'vgg_16/conv1/conv1_1/kernel': 'vgg_16/conv1/conv1_1/weights',
        'vgg_16/conv1/conv1_1/bias': 'vgg_16/conv1/conv1_1/biases',
        'vgg_16/conv1/conv1_2/kernel': 'vgg_16/conv1/conv1_2/weights',
        'vgg_16/conv1/conv1_2/bias': 'vgg_16/conv1/conv1_2/biases'
    }

    stack_2 = {
        'vgg_16/conv2/conv2_1/kernel': 'vgg_16/conv2/conv2_1/weights',
        'vgg_16/conv2/conv2_1/bias': 'vgg_16/conv2/conv2_1/biases',
        'vgg_16/conv2/conv2_2/kernel': 'vgg_16/conv2/conv2_2/weights',
        'vgg_16/conv2/conv2_2/bias': 'vgg_16/conv2/conv2_2/biases'
Exemple #5
0
"""Contains the class to load the dataset."""
from PIL import Image
import pandas as pd
from sklearn.model_selection import train_test_split
import os
import numpy as np
from src.lib.neptune import get_params
from src.input_pipe.data import BAD_IDS

params = get_params()


class DatasetLoader():
    """Dataset loader."""

    def __init__(self, path, img_type='images', remove_bad=False, resize=128, split=0.1, post_df_methods=[], load_test_set=False):
        """Initialize the loader class.

        Parameters
        ----------
        path : str
            Path to the downloaded kaggle dataset
        resize : int, optional
            Specify the size to which the images should be resized to, None keeps
            the same size. Default is 128
        split : float, optional
            Validation ratio, default is 0.1
        post_df_methods : None, optional
            A list of methods to apply after dataframes have been loaded.
            The dataframes have the following columns
                images: Numpy arrays with the loaded images in the specified size, normalized
Exemple #6
0
def get_input_fn(is_test=False):
    """Create a generator for datasets.

    This method will create input_fn that can be used to feed Tensorflow with
    images for training and inference. It uses the parameters specified in the
    ./config/neptune.yaml file. If this is run at test time, then train and validation
    input functions are not created, and vice-vesa

    Returns
    -------
    dict
        A dictionary which contains input_fn for the required task.
        keys:
            'train', 'valid', 'test'
    """
    # Experiment parameters
    params = get_params()

    # Get augmenter to apply boot strapping methods
    augmenter = get_augmenter()

    # Get generator methods
    generators, img_shape, mask_shape = create_generator(augmenter,
                                                         is_test=is_test)

    def create_dataset(generator,
                       augmenter=augmenter,
                       params=params,
                       is_train=True):
        with tf.variable_scope('feeder'):
            types = (tf.uint8, tf.uint8)
            shapes = (tf.TensorShape(img_shape), tf.TensorShape(mask_shape))

            # Initialize dataset
            dataset = tf.data.Dataset.from_generator(generator, types, shapes)

            # Form a batch
            dataset = dataset.batch(params.batch_size)

            # After batch has been collected, apply transformations
            if is_train:
                dataset = dataset.map(
                    augmenter.apply,
                    num_parallel_calls=params.num_parallel_calls)
            else:
                # The normalization is rather lightweight, no threads are allocated for it.
                dataset = dataset.map(augmenter.apply_normalization_tfunc)

            # Prefetch batches
            dataset = dataset.prefetch(params.prefetch_batches)

            # Make into an iterator
            iterator = dataset.make_one_shot_iterator()
            image, mask = iterator.get_next()

            # Set shape manually since opencv is used in background, it returns unknown shape
            shape = [-1] + mask_shape
            mask = tf.reshape(mask, shape)

            # Sometimes, augmenter will add channels during normalization, reflect that here
            shape = [-1] + img_shape
            shape[-1] = augmenter.output_image_channels
            image = tf.reshape(image, shape)

            # Return two items
            features = {'img': image}
            labels = {'mask': mask}

            return features, labels

    input_fn = {}

    # If testing, then simply return a numpy generator
    if is_test:
        input_fn['test'] = generators
    else:

        def train_input_fn():
            return create_dataset(generators[0], is_train=True)

        def eval_input_fn():
            return create_dataset(generators[1], is_train=False)

        input_fn['train'] = train_input_fn
        input_fn['valid'] = eval_input_fn

    return input_fn
Exemple #7
0
 def __init__(self):
     """Initialize class."""
     self._logger = Logger()
     self._logging = logging.getLogger('tensorflow')
     self._params = get_params()