Beispiel #1
0
def _parse_dense_layer(config: dict) -> tf.layers.Dense:
    """
    Function to build dense layer with specific config.

    Parameters
    ----------
    config: dict holding 'units' key.
    
    Optional Keys: 'activation','kernel_initializer','name', 'trainable

    Returns
    -------
    layer: tf.layers.Dense with specified configuration.
    """
    activation = cutil.safe_get('activation', config)
    kernel_initializer = config.get('kernel_initializer',
                                    tf.initializers.lecun_uniform())
    bias_initializer = config.get('bias_initializer', tf.ones_initializer())
    name = cutil.safe_get('name', config)
    trainable = cutil.safe_get('trainable', config)

    layer = tf.layers.Dense(config['units'],
                            activation=activation,
                            kernel_initializer=kernel_initializer,
                            bias_initializer=bias_initializer,
                            name=name,
                            trainable=trainable)
    return layer
Beispiel #2
0
def _parse_activation(config: dict):
    """
    Parse activation function and return callable with specified name.

    Parameters
    ----------
    config: dict with key 'function'.
    Optional Keys: 'name'

    Returns
    -------
    lambda x: function(x, name=name)
    """
    name = cutil.safe_get('name', config)
    function = cutil.safe_get('function', config)
    return lambda x: function(x, name=name)
Beispiel #3
0
def _parse_batchnorm_layer(config: dict) -> tf.layers.BatchNormalization:
    """
    Function to create batch normalization layer on specified axis.

    Parameters
    ----------
    config: dict with key 'axis'.

    Optional Keys: 'name'

    Returns
    -------
    layer: tf.layers.BatchNormalization(axis=axis,name=name)
    """
    axis = cutil.safe_get('axis', config)
    name = cutil.safe_get('name', config)
    return tf.layers.BatchNormalization(axis=axis, name=name)
Beispiel #4
0
def _parse_avgunpool_layer(config: dict):
    """
    Function to create and avg unpooling layer with given factor.
    This is a custom implementation.

    Parameters
    ----------
    config: dict holding key 'factor'.
    
    Optional Keys: 'name'

    Returns
    -------
    lambda x: avg_unpool2d(x, factor, name=name) callable which performs the desired operation.
    """
    name = cutil.safe_get('name', config)
    factor = cutil.safe_get('factor', config)
    return lambda x: avg_unpool2d(x, factor, name=name)
Beispiel #5
0
def construct_train_fn(config, operations=[]):
    """
    Function to construct the training function based on the config.

    Parameters
    ----------
    config: dict holding model configuration.

    Returns
    -------
    train_fn: callable which is passed to estimator.train function.
    This function prepares the dataset and returns it in a format which is suitable for the estimator API.
    """

    cfg_train_ds = cutil.safe_get('training', config)

    # Create decode operation
    decode_op = construct_decode_op(config['features'])

    # Create unzip operation
    unzip_op = construct_unzip_op()

    operations.insert(0, decode_op)
    if 'operations' in cfg_train_ds:
        for op in cfg_train_ds['operations']:
            operations.append(cutil.get_function(op['module'], op['name']))

    operations.append(unzip_op)
    preprocess = cutil.concatenate_functions(operations)

    def train_fn():
        """
        Function which is passed to .train(...) call of an estimator object.

        Returns
        -------
        dataset: tf.data.Dataset object with elements ({'f0': v0, ... 'fx': vx}, label).
        """
        #Load the dataset
        dataset = tf.data.TFRecordDataset(cfg_train_ds['filename'])

        # Apply possible preprocessing, batch and prefetch the dataset.
        dataset = dataset.map(preprocess, num_parallel_calls=os.cpu_count())

        sample = tf.data.experimental.get_single_element(dataset.take(1))
        element_size = get_deep_size(sample)

        # Shuffle the dataset
        buffer_size = tf.constant(
            int((virtual_memory().total / 2) / element_size), tf.int64)
        dataset = dataset.shuffle(config['shuffle_size'])

        dataset = dataset.batch(config['batch'])
        dataset = dataset.prefetch(buffer_size=1)
        return dataset.repeat()

    return train_fn
Beispiel #6
0
def _parse_maxpool_layer(config: dict) -> tf.layers.MaxPooling2D:
    """
    Function to build MaxPooling2D layer with specific config.

    Parameters
    ----------
    config: dict holding 'pool_size' and 'strides' key.
    
    Optional Keys: 'name'

    Returns
    -------
    layer: tf.layers.MaxPooling2D with specified configuration.
    """
    # Retrieve attributes from config
    pool_size = cutil.safe_get('pool_size', config)
    strides = cutil.safe_get('strides', config)
    name = cutil.safe_get('name', config)

    return tf.layers.MaxPooling2D(pool_size, strides, name=name)
Beispiel #7
0
def _parse_maxunpool_layer(config: dict):
    """
    Function to create max_unpool2d layer.
    This is a custom implementation.

    Parameters
    ----------    
        dict: Optional Keys: 'name'

    Returns
    -------
        lambda x: max_unpool2d(x, name=name)
    """
    name = cutil.safe_get('name', config)
    return lambda x: max_unpool2d(x, name=name)
Beispiel #8
0
def _parse_conv_layer(config: dict):
    """
    Function to build convolutional 2d layer with specific config.
    Pass 'transpose': True in config to create deconvolution layer.

    Parameters
    ----------
    config: dict holding 'filters', 'strides' and 'kernel_size' keys.

    Optional Keys: 'activation','kernel_initializer','name','bias_initializer', 'trainable', 'transpose'

    Returns
    -------
    layer: tf.layers.Conv2D or tf.layers.Conv2DTranspose with specified configuration.
    """
    filters = config['filters']
    strides = cutil.safe_get('strides', config)
    kernel_size = cutil.safe_get('kernel_size', config)
    name = cutil.safe_get('name', config)
    activation = cutil.safe_get('activation', config)
    kernel_initializer = config.get('kernel_initializer',
                                    tf.initializers.lecun_uniform())
    bias_initializer = config.get('bias_initializer', tf.ones_initializer())
    trainable = cutil.safe_get('trainable', config)
    transpose = cutil.safe_get('transpose', config)
    padding = config.get('padding', 'same')

    if transpose is not None and transpose == True:
        layer = tf.layers.Conv2DTranspose(
            filters,
            kernel_size,
            strides,
            padding=padding,
            name=name,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
            activation=activation,
            trainable=trainable)
    else:
        layer = tf.layers.Conv2D(filters,
                                 kernel_size,
                                 strides,
                                 padding=padding,
                                 name=name,
                                 kernel_initializer=kernel_initializer,
                                 bias_initializer=bias_initializer,
                                 activation=activation,
                                 trainable=trainable)
    return layer
Beispiel #9
0
def main(argv):
    parser = argparse.ArgumentParser(
        description='Compute latent code for image patch by model inference.')
    parser.add_argument('export_dir',
                        type=str,
                        help='Path to saved model to use for inference.')

    args = parser.parse_args()

    # Load config files, separated in this example.
    dataset_config_file = os.path.join(git_root, 'examples', 'dataset',
                                       'dataset.json')
    model_config_file = os.path.join(git_root, 'examples', 'dataset',
                                     'model.json')

    cfg_datasets = ctfm.parse_json(dataset_config_file)['datasets']
    cfg_model = ctfm.parse_json(model_config_file)['model']

    cfg_train_ds = cutil.safe_get('training', cfg_datasets)

    model_dir = args.export_dir

    params_dict = {
        'config': cfg_model,
        'model_dir': model_dir,
    }

    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        model_dir=model_dir,
                                        params=params_dict,
                                        config=tf.estimator.RunConfig(
                                            model_dir=model_dir,
                                            save_summary_steps=100,
                                            log_step_count_steps=100))

    classifier = classifier.train(
        input_fn=ctfd.construct_train_fn(cfg_datasets),
        steps=cfg_train_ds['steps'])