Exemplo n.º 1
0
    def __init__(self,
                 name=None,
                 units=1,
                 tensor=None,
                 dtype=None):

        if not dtype:
            dtype = K.floatx()
        elif not dtype == K.floatx():
            K.set_floatx(dtype)

        if units < 1:
            raise ValueError(
                'Expected at least one unit size - was provided `units`={:d}'.format(units)
            )

        layer = InputLayer(
            batch_input_shape=(None, units),
            input_tensor=tensor,
            name=name,
            dtype=dtype
        )

        super(RadialBasisBase, self).__init__(
            layers=to_list(layer),
            inputs=to_list(layer.input),
            outputs=to_list(layer.output),
        )
Exemplo n.º 2
0
def train(model_func, params):
    image_width = params['image_width']
    image_height = params['image_height']
    image_format = params['image_format']
    distort_color = params['distort_color']
    momentum = params['momentum']
    loss_scale = params['loss_scale']
    data_dir = params['data_dir']
    data_idx_dir = params['data_idx_dir']
    batch_size = params['batch_size']
    num_iter = params['num_iter']
    iter_unit = params['iter_unit']
    log_dir = params['log_dir']
    export_dir = params['export_dir']
    tensorboard_dir = params['tensorboard_dir']
    display_every = params['display_every']
    precision = params['precision']
    dali_mode = params['dali_mode']
    use_xla = params['use_xla']

    if data_dir is not None:
        file_format = os.path.join(data_dir, '%s-*')
        train_files = sorted(tf.io.gfile.glob(file_format % 'train'))
        valid_files = sorted(tf.io.gfile.glob(file_format % 'validation'))
        num_train_samples = common.get_num_records(train_files)
        num_valid_samples = common.get_num_records(valid_files)
    else:
        num_train_samples = 1281982
        num_valid_samples = 5000

    train_idx_files = None
    valid_idx_files = None
    if data_idx_dir is not None:
        file_format = os.path.join(data_idx_dir, '%s-*')
        train_idx_files = sorted(tf.io.gfile.glob(file_format % 'train'))
        valid_idx_files = sorted(tf.io.gfile.glob(file_format % 'validation'))

    if iter_unit.lower() == 'epoch':
        num_epochs = num_iter
        nstep_per_epoch = num_train_samples // (batch_size * hvd.size())
        nstep_per_valid = num_valid_samples // (batch_size * hvd.size())
    else:
        assert iter_unit.lower() == 'batch'
        num_epochs = 1
        nstep_per_epoch = min(num_iter,
                              num_train_samples // (batch_size * hvd.size()))
        nstep_per_valid = min(10,
                              num_valid_samples // (batch_size * hvd.size()))

    initial_epoch = 0
    if log_dir:
        # We save check points only when using the real data.
        assert data_dir, "--data_dir cannot be empty when using --log_dir"
        assert os.path.exists(log_dir)
        ckpt_format = log_dir + "/model-{epoch:02d}-{val_top1:.2f}.hdf5"
        # Looks for the most recent checkpoint and sets the initial epoch from it.
        for filename in os.listdir(log_dir):
            if filename.startswith('model-'):
                initial_epoch = max(int(re.findall(r'\d+', filename)[0]),
                                    initial_epoch)

    if tensorboard_dir:
        assert os.path.exists(tensorboard_dir)

    if export_dir:
        assert os.path.exists(export_dir)
        save_format = export_dir + "/saved_model_rn50.h5"

    if use_xla:
        tf.config.optimizer.set_jit(True)

    # Horovod: pin GPU to be used to process local rank (one GPU per process)
    gpus = tf.config.experimental.list_physical_devices('GPU')
    for gpu in gpus:
        tf.config.experimental.set_memory_growth(gpu, True)
    if gpus:
        tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()],
                                                   'GPU')

    if precision == 'fp16':
        policy = keras.mixed_precision.experimental.Policy(
            'mixed_float16', loss_scale)
        keras.mixed_precision.experimental.set_policy(policy)

    lr_schedule = common.create_piecewise_constant_decay_with_warmup(
        batch_size=batch_size * hvd.size(),
        epoch_size=num_train_samples,
        warmup_epochs=common.LR_SCHEDULE[0][1],
        boundaries=list(p[1] for p in common.LR_SCHEDULE[1:]),
        multipliers=list(p[0] for p in common.LR_SCHEDULE),
        compute_lr_on_cpu=True)
    opt = keras.optimizers.SGD(learning_rate=lr_schedule, momentum=momentum)
    # Horovod: add Horovod DistributedOptimizer. We use a modified version to
    # support the custom learning rate schedule.
    opt = hvd_patch.DistributedOptimizer(opt)

    backend.set_image_data_format(image_format)
    dtype = 'float16' if precision == 'fp16' else 'float32'
    backend.set_floatx(dtype)
    model = model_func(num_classes=image_processing.NUM_CLASSES)
    loss_func = 'sparse_categorical_crossentropy',

    top5 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5, name='top5')
    top1 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1, name='top1')

    # Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
    # uses hvd.DistributedOptimizer() to compute gradients. However, this option
    # will disable the overlapping of the data loading and compute and hurt the
    # performace if the model is not under the scope of distribution strategy
    # scope.
    model.compile(optimizer=opt,
                  loss=loss_func,
                  metrics=[top1, top5],
                  experimental_run_tf_function=False)

    training_hooks = []
    training_hooks.append(hvd.callbacks.BroadcastGlobalVariablesCallback(0))
    training_hooks.append(_ProfileKerasFitCallback(batch_size, display_every))

    if log_dir and hvd.rank() == 0:
        ckpt_callback = keras.callbacks.ModelCheckpoint(
            ckpt_format,
            monitor='val_top1',
            verbose=1,
            save_best_only=False,
            save_weights_only=False,
            save_frequency=1)
        training_hooks.append(ckpt_callback)

    if tensorboard_dir and hvd.rank() == 0:
        tensorboard_callback = tf.keras.callbacks.TensorBoard(
            log_dir=tensorboard_dir)
        training_hooks.append(tensorboard_callback)

    if data_dir is not None:
        num_preproc_threads = params['dali_threads'] if dali_mode else 10
        train_input = image_processing.image_set(
            train_files,
            batch_size,
            image_height,
            image_width,
            training=True,
            distort_color=distort_color,
            deterministic=False,
            num_threads=num_preproc_threads,
            use_dali=dali_mode,
            idx_filenames=train_idx_files)

        valid_input = image_processing.image_set(
            valid_files,
            batch_size,
            image_height,
            image_width,
            training=False,
            distort_color=False,
            deterministic=False,
            num_threads=num_preproc_threads,
            use_dali=dali_mode,
            idx_filenames=valid_idx_files)
        if dali_mode:
            train_input = train_input.get_device_dataset()
            valid_input = valid_input.get_device_dataset()
        valid_params = {
            'validation_data': valid_input,
            'validation_steps': nstep_per_valid,
            'validation_freq': 1
        }
    else:
        train_input = image_processing.fake_image_set(batch_size, image_height,
                                                      image_width)
        valid_params = {}

    try:
        verbose = 2 if hvd.rank() == 0 else 0
        model.fit(train_input,
                  epochs=num_epochs,
                  callbacks=training_hooks,
                  steps_per_epoch=nstep_per_epoch,
                  verbose=verbose,
                  initial_epoch=initial_epoch,
                  **valid_params)
    except KeyboardInterrupt:
        print("Keyboard interrupt")

    if export_dir and hvd.rank() == 0:
        model.save(save_format)
        print(f"The model is saved to {save_format}")
Exemplo n.º 3
0
def retinanet_mask_3D(inputs,
                      num_classes,
                      retinanet_model=None,
                      anchor_params=None,
                      nms=True,
                      class_specific_filter=True,
                      crop_size=(14, 14),
                      mask_size=(28, 28, 28),
                      name='retinanet-mask-3D',
                      roi_submodels=None,
                      mask_dtype=K.floatx(),
                      **kwargs):
    """Construct a RetinaNet mask model on top of a retinanet bbox model.
    Uses the retinanet bbox model and appends layers to compute masks.
    Args:
        inputs: List of tensorflow.keras.layers.Input.
            The first input is the image, the second input the blob of masks.
        num_classes: Integer, number of classes to classify.
        retinanet_model: deepcell.model_zoo.retinanet.retinanet model,
            returning regression and classification values.
        anchor_params: Struct containing anchor parameters.
        nms: Boolean, whether to use NMS.
        class_specific_filter: Boolean, use class specific filtering.
        roi_submodels: Submodels for processing ROIs.
        mask_dtype: Data type of the masks, can be different from the main one.
        name: Name of the model.
        **kwargs: Additional kwargs to pass to the retinanet bbox model.
    Returns:
        Model with inputs as input and as output the output of each submodel
        for each pyramid level and the detections. The order is as defined in
        submodels.
        ```
        [
            regression, classification, other[0], other[1], ...,
            boxes_masks, boxes, scores, labels, masks, other[0], other[1], ...
        ]
        ```
    """
    if anchor_params is None:
        anchor_params = AnchorParameters.default

    if roi_submodels is None:
        retinanet_dtype = K.floatx()
        K.set_floatx(mask_dtype)
        roi_submodels = default_roi_submodels(num_classes, crop_size,
                                              mask_size, mask_dtype,
                                              retinanet_dtype)
        K.set_floatx(retinanet_dtype)

    image = inputs
    image_shape = Shape()(image)

    if retinanet_model is None:
        retinanet_model = retinanet(inputs=image,
                                    num_classes=num_classes,
                                    num_anchors=anchor_params.num_anchors(),
                                    **kwargs)

    # parse outputs
    regression = retinanet_model.outputs[0]
    classification = retinanet_model.outputs[1]
    other = retinanet_model.outputs[2:]
    features = [
        retinanet_model.get_layer(name).output
        for name in ['P3', 'P4', 'P5', 'P6', 'P7']
    ]

    # build boxes
    anchors = __build_anchors(anchor_params, features)
    boxes = RegressBoxes(name='boxes')([anchors, regression])
    boxes = ClipBoxes(name='clipped_boxes')([image, boxes])

    # filter detections (apply NMS / score threshold / select top-k)
    detections = FilterDetections(
        nms=nms,
        class_specific_filter=class_specific_filter,
        max_detections=100,
        name='filtered_detections')([boxes, classification] + other)

    # split up in known outputs and "other"
    boxes = detections[0]
    scores = detections[1]

    # get the region of interest features
    roi_input = [image_shape, boxes, classification] + features
    rois = RoiAlign(crop_size=crop_size)(roi_input)

    # execute maskrcnn submodels
    maskrcnn_outputs = [submodel(rois) for _, submodel in roi_submodels]

    # concatenate boxes for loss computation
    trainable_outputs = [
        ConcatenateBoxes(name=name)([boxes, output])
        for (name, _), output in zip(roi_submodels, maskrcnn_outputs)
    ]

    # reconstruct the new output
    outputs = [regression, classification] + other + trainable_outputs + \
        detections + maskrcnn_outputs

    return Model(inputs=inputs, outputs=outputs, name=name)
Exemplo n.º 4
0
def retinanet_mask(inputs,
                   backbone_dict,
                   num_classes,
                   frames_per_batch=1,
                   backbone_levels=['C3', 'C4', 'C5'],
                   pyramid_levels=['P3', 'P4', 'P5', 'P6', 'P7'],
                   retinanet_model=None,
                   anchor_params=None,
                   nms=True,
                   panoptic=False,
                   class_specific_filter=True,
                   crop_size=(14, 14),
                   mask_size=(28, 28),
                   name='retinanet-mask',
                   roi_submodels=None,
                   max_detections=100,
                   score_threshold=0.05,
                   nms_threshold=0.5,
                   mask_dtype=K.floatx(),
                   **kwargs):
    """Construct a RetinaNet mask model on top of a retinanet bbox model.
    Uses the retinanet bbox model and appends layers to compute masks.

    Args:
        inputs (tensor): List of tensorflow.keras.layers.Input.
            The first input is the image, the second input the blob of masks.
        backbone_dict (dict): A dictionary with the backbone layers.
        num_classes (int): Integer, number of classes to classify.
        frames_per_batch (int): Size of z axis in generated batches.
            If equal to 1, assumes 2D data.
        backbone_levels (list): The backbone levels to be used.
            to create the feature pyramid. Defaults to ['C3', 'C4', 'C5'].
        pyramid_levels (list): The pyramid levels to attach regression and
            classification heads to. Defaults to ['P3', 'P4', 'P5', 'P6', 'P7'].
        retinanet_model (tensorflow.keras.Model): RetinaNet model that predicts
            regression and classification values.
        anchor_params (AnchorParameters): Struct containing anchor parameters.
        nms (bool): Whether to use non-maximum suppression
            for the filtering step.
        panoptic (bool): Flag for adding the semantic head for panoptic
            segmentation tasks. Defaults to false.
        class_specific_filter (bool): Use class specific filtering.
        crop_size (tuple): 2-length tuple for the x-y size of the crops.
            Used to create default roi_submodels.
        mask_size (tuple): 2-length tuple for the x-y size of the masks.
            Used to create default roi_submodels.
        name (str): Name of the model.
        roi_submodels (list): Submodels for processing ROIs.
        max_detections (int): The maximum number of detections allowed.
        score_threshold (float): Minimum score for the FilterDetections layer.
        nms_threshold (float): Minimimum NMS for the FilterDetections layer.
        mask_dtype (str): Dtype to use for mask tensors.
        kwargs (dict): Additional kwargs to pass to the retinanet bbox model.

    Returns:
        tensorflow.keras.Model: Model with inputs as input and as output
            the output of each submodel for each pyramid level and the
            detections. The order is as defined in submodels.

            ```
            [
                regression, classification, other[0], ...,
                boxes_masks, boxes, scores, labels, masks, other[0], ...
            ]
            ```

    """
    if anchor_params is None:
        anchor_params = AnchorParameters.default

    if roi_submodels is None:
        retinanet_dtype = K.floatx()
        K.set_floatx(mask_dtype)
        roi_submodels = default_roi_submodels(num_classes, crop_size,
                                              mask_size, frames_per_batch,
                                              mask_dtype, retinanet_dtype)
        K.set_floatx(retinanet_dtype)

    image = inputs
    image_shape = Shape()(image)

    if retinanet_model is None:
        retinanet_model = retinanet(inputs=image,
                                    backbone_dict=backbone_dict,
                                    num_classes=num_classes,
                                    backbone_levels=backbone_levels,
                                    pyramid_levels=pyramid_levels,
                                    panoptic=panoptic,
                                    num_anchors=anchor_params.num_anchors(),
                                    frames_per_batch=frames_per_batch,
                                    **kwargs)

    # parse outputs
    regression = retinanet_model.outputs[0]
    classification = retinanet_model.outputs[1]

    if panoptic:
        # Determine the number of semantic heads
        n_semantic_heads = len([
            1 for layer in retinanet_model.layers if 'semantic' in layer.name
        ])

        # The  panoptic output should not be sent to filter detections
        other = retinanet_model.outputs[2:-n_semantic_heads]
        semantic = retinanet_model.outputs[-n_semantic_heads:]
    else:
        other = retinanet_model.outputs[2:]

    features = [
        retinanet_model.get_layer(name).output for name in pyramid_levels
    ]

    # build boxes
    anchors = __build_anchors(anchor_params,
                              features,
                              frames_per_batch=frames_per_batch)
    boxes = RegressBoxes(name='boxes')([anchors, regression])
    boxes = ClipBoxes(name='clipped_boxes')([image, boxes])

    # filter detections (apply NMS / score threshold / select top-k)
    detections = FilterDetections(
        nms=nms,
        nms_threshold=nms_threshold,
        score_threshold=score_threshold,
        class_specific_filter=class_specific_filter,
        max_detections=max_detections,
        name='filtered_detections')([boxes, classification] + other)

    # split up in known outputs and "other"
    boxes = detections[0]
    scores = detections[1]

    # get the region of interest features
    #
    # roi_input = [image_shape, boxes, classification] + features
    # rois = _RoiAlign(crop_size=crop_size)(roi_input)

    fpn = features[0]
    fpn = UpsampleLike()([fpn, image])
    rois = RoiAlign(crop_size=crop_size)([boxes, fpn])

    # execute maskrcnn submodels
    maskrcnn_outputs = [submodel(rois) for _, submodel in roi_submodels]

    # concatenate boxes for loss computation
    trainable_outputs = [
        ConcatenateBoxes(name=name)([boxes, output])
        for (name, _), output in zip(roi_submodels, maskrcnn_outputs)
    ]

    # reconstruct the new output
    outputs = [regression, classification] + other + trainable_outputs + \
        detections + maskrcnn_outputs

    if panoptic:
        outputs += list(semantic)

    model = Model(inputs=inputs, outputs=outputs, name=name)
    model.backbone_levels = backbone_levels
    model.pyramid_levels = pyramid_levels

    return model
Exemplo n.º 5
0
    def __init__(self,
                 fields=None,
                 variables=None,
                 hidden_layers=None,
                 activation="tanh",
                 output_activation="linear",
                 rnn_type="SimpleRNN",
                 recurrent_activation="tanh",
                 kernel_initializer=None,
                 bias_initializer=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 dtype=None,
                 trainable=True,
                 **kwargs):
        # check data-type.
        if dtype is None:
            dtype = K.floatx()
        elif not K.floatx() == dtype:
            K.set_floatx(dtype)
        # prepare hidden layers.
        if hidden_layers is None:
            hidden_layers = []
        else:
            hidden_layers = to_list(hidden_layers)
        # check for copy constructor.
        if all([x in kwargs for x in ('inputs', 'outputs', 'layers')]):
            self._inputs = kwargs['inputs'].copy()
            self._outputs = kwargs['outputs'].copy()
            self._layers = kwargs['layers'].copy()
            self._set_model()
            return
        # prepare kernel initializers.
        activations, def_biasinit, def_kerinit = \
            prepare_default_activations_and_initializers(
            len(hidden_layers) * [activation] + [output_activation]
        )
        if kernel_initializer is None:
            kernel_initializer = def_kerinit
        elif isinstance(kernel_initializer, (float, int)):
            kernel_initializer = default_weight_initializer(
                len(hidden_layers) * [activation] + [output_activation],
                'constant',
                scale=kernel_initializer)
        else:
            kernel_initializer = [
                kernel_initializer for l in len(hidden_layers) * [activation] +
                [output_activation]
            ]
        # prepare bias initializers.
        if bias_initializer is None:
            bias_initializer = def_biasinit
        elif isinstance(bias_initializer, (float, int)):
            bias_initializer = default_weight_initializer(
                len(hidden_layers) * [activation] + [output_activation],
                'constant',
                scale=bias_initializer)
        else:
            bias_initializer = [
                bias_initializer for l in len(hidden_layers) * [activation] +
                [output_activation]
            ]
        # prepare regularizers.
        kernel_regularizer = default_regularizer(kernel_regularizer)
        bias_regularizer = default_regularizer(bias_regularizer)
        # prepares fields.
        fields = to_list(fields)
        if all([isinstance(fld, str) for fld in fields]):
            output_fields = [
                RNNField(
                    name=fld,
                    dtype=dtype,
                    kernel_initializer=kernel_initializer[-1],
                    bias_initializer=bias_initializer[-1],
                    kernel_regularizer=kernel_regularizer,
                    bias_regularizer=bias_regularizer,
                    trainable=trainable,
                ) for fld in fields
            ]
        elif all([validations.is_field(fld) for fld in fields]):
            output_fields = fields
        else:
            raise TypeError('Please provide a "list" of field names of' +
                            ' type "String" or "Field" objects.')
        # prepare inputs/outputs/layers.
        inputs = []
        layers = []
        variables = to_list(variables)
        if all([isinstance(var, RNNFunctional) for var in variables]):
            for var in variables:
                inputs += var.outputs
            # for var in variables:
            #     for lay in var.layers:
            #         layers.append(lay)
        else:
            raise TypeError(
                "Input error: Please provide a `list` of `Functional`s. \n"
                "Provided - {}".format(variables))
        # prepare hidden layers.
        if hidden_layers is None:
            hidden_layers = []
        else:
            hidden_layers = to_list(hidden_layers)
        # Check and convert activation functions to proper format.
        assert not isinstance(activation, list), \
            'Expected an activation function name not a "list". '
        afunc = get_activation(activation)

        # Input layers.
        if len(inputs) == 1:
            net_input = inputs[0]
        else:
            layer = Concatenate(name=graph_unique_name('conct'))
            net_input = layer(inputs)

        # Define the networks.
        net = [net_input]
        assert len(
            hidden_layers) > 0, 'Minimum of 1 RNN hidden layer is needed.'

        # Adding hidden layers
        for nLay, nNeuron in enumerate(hidden_layers):
            if nLay < 1000:
                # First layer starts with RNN.
                if rnn_type == 'LSTM':
                    layer = LSTM(nNeuron,
                                 return_sequences=True,
                                 recurrent_activation=recurrent_activation,
                                 kernel_initializer=kernel_initializer[nLay],
                                 bias_initializer=bias_initializer[nLay],
                                 kernel_regularizer=kernel_regularizer,
                                 bias_regularizer=bias_regularizer,
                                 trainable=trainable,
                                 dtype=dtype,
                                 unroll=True,
                                 name=graph_unique_name(
                                     "LSTM{:d}b_".format(nNeuron)))
                elif rnn_type == 'SimpleRNN':
                    layer = SimpleRNN(
                        nNeuron,
                        return_sequences=True,
                        kernel_initializer=kernel_initializer[nLay],
                        bias_initializer=bias_initializer[nLay],
                        kernel_regularizer=kernel_regularizer,
                        bias_regularizer=bias_regularizer,
                        trainable=trainable,
                        dtype=dtype,
                        unroll=True,
                        name=graph_unique_name("SRNN{:d}b_".format(nNeuron)))
                else:
                    raise ValueError('Invalid entry for `rnn_type` -- '
                                     'accepts from (`SimpleRNN`, `LSTM`).')
            else:
                # Add the dense layer.
                layer = Dense(nNeuron,
                              kernel_initializer=kernel_initializer[nLay],
                              bias_initializer=bias_initializer[nLay],
                              kernel_regularizer=kernel_regularizer,
                              bias_regularizer=bias_regularizer,
                              trainable=trainable,
                              dtype=dtype,
                              name=graph_unique_name("D{:d}b".format(nNeuron)))
            layers.append(layer)
            net[-1] = layer(net[-1])
            # Apply the activation.
            if afunc.__name__ != 'linear':
                layer = activations[nLay]
                layers.append(layer)
                net[-1] = layer(net[-1])

        # store output layers.
        for out in output_fields:
            layers.append(out)

        # Assign to the output variable
        if len(net) == 1:
            net_output = net[0]
        else:
            raise ValueError("Legacy for Enrichment: Must be updated. ")
            layer = Concatenate(name=graph_unique_name("{}_".format("conct")))
            net_output = layer(net)

        # check output activation functions.
        output_func = get_activation(output_activation)
        # Define the final outputs of each network
        outputs = []
        for out in output_fields:
            # add the activation on the output.
            if output_func.__name__ != 'linear':
                layer = activations[-1]
                layers.append(layer)
                outputs.append(layer(out(net_output)))
            else:
                outputs.append(out(net_output))

        self._inputs = inputs
        self._outputs = outputs
        self._layers = layers
        self._set_model()
Exemplo n.º 6
0
# Copyright 2020 Graphcore Ltd.
"""Helper utility to download imagenet weights for Densenet model."""

from pathlib import Path
from typing import List

import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib.framework.python.framework.checkpoint_utils import \
    list_variables, load_variable
from tensorflow.python.keras import backend as keras_backend
from tensorflow.python.keras.applications.densenet import DenseNet121

keras_backend.set_floatx('float16')


def get_densenet_weights(save_dir: Path = Path('densenet_121')) -> Path:
    """Download pre-trained imagenet weights for densenet model.

    Args:
        save_dir: Path to where checkpoint must be downloaded.

    Returns: Path to checkpoint file.

    """
    g = tf.Graph()
    with tf.Session(graph=g) as sess:
        keras_backend.set_session(sess)
        save_dir.mkdir(parents=True, exist_ok=True)
        _ = DenseNet121(weights='imagenet')
        saver = tf.train.Saver()
Exemplo n.º 7
0
def predict(hparams,
            model_dir, checkpoint_path, output_dir,
            test_source_files, test_target_files):
    if hparams.half_precision:
        backend.set_floatx(tf.float16.name)
        backend.set_epsilon(1e-4)

    audio = Audio(hparams)

    def predict_input_fn():
        source = tf.data.TFRecordDataset(list(test_source_files))
        target = tf.data.TFRecordDataset(list(test_target_files))
        dataset = dataset_factory(source, target, hparams)
        batched = dataset.prepare_and_zip().group_by_batch(
            batch_size=1).move_mel_to_source()
        return batched.dataset

    estimator = model_factory(hparams, model_dir, None)

    predictions = map(
        lambda p: PredictedMel(p["id"], p["key"], p["mel"], p.get("mel_postnet"), p["mel"].shape[1], p["mel"].shape[0],
                               p["ground_truth_mel"],
                               p["alignment"], p.get("alignment2"), p.get("alignment3"), p.get("alignment4"),
                               p.get("alignment5"), p.get("alignment6"), p.get("attention2_gate_activation"),
                               p["source"], p["text"], p.get("accent_type"), p),
        estimator.predict(predict_input_fn, checkpoint_path=checkpoint_path))

    for v in predictions:
        key = v.key.decode('utf-8')
        mel_filename = f"{key}.{hparams.predicted_mel_extension}"
        mel_filepath = os.path.join(output_dir, mel_filename)
        ground_truth_mel = v.ground_truth_mel.astype(np.float32)
        predicted_mel = v.predicted_mel.astype(np.float32)
        mel_denormalized = audio.denormalize_mel(predicted_mel)

        linear_spec = audio.logmelspc_to_linearspc(mel_denormalized)
        wav = audio.griffin_lim(linear_spec)
        audio.save_wav(wav, os.path.join(output_dir, f"{key}.wav"))

        assert mel_denormalized.shape[1] == hparams.num_mels
        mel_denormalized.tofile(mel_filepath, format='<f4')
        text = v.text.decode("utf-8")
        plot_filename = f"{key}.png"
        plot_filepath = os.path.join(output_dir, plot_filename)
        alignments = [x.astype(np.float32) for x in [
            v.alignment, v.alignment2, v.alignment3, v.alignment4, v.alignment5, v.alignment6] if x is not None]

        if hparams.model == "SSNTModel":
            ssnt_metrics.save_alignment_and_log_probs([v.alignment],
                                                      [v.all_fields["log_emit_and_shift_probs"]],
                                                      [v.all_fields["log_output_prob"]],
                                                      [None],
                                                      text, v.key, 0,
                                                      os.path.join(output_dir, f"{key}_probs.png"))
            ssnt_metrics.write_prediction_result(v.id, key, text,
                                                 v.all_fields["log_emit_and_shift_probs"],
                                                 v.all_fields["log_output_prob"],
                                                 os.path.join(output_dir, f"{key}_probs.tfrecord"))
        plot_predictions(alignments, ground_truth_mel, predicted_mel, text, v.key, plot_filepath)
        prediction_filename = f"{key}.tfrecord"
        prediction_filepath = os.path.join(output_dir, prediction_filename)
        write_prediction_result(v.id, key, alignments, mel_denormalized, audio.denormalize_mel(ground_truth_mel),
                                text, v.source, prediction_filepath)
Exemplo n.º 8
0
# %%
num_meta_tasks = 20
num_test_tasks = 5
num_datapoints = 5
meta, test = generate_meta_and_test_tasks(num_datapoints, num_meta_tasks, num_test_tasks)

# %% [markdown]
# ## Create the mean function
# We will use a Keras model Deep Neural Network as mean function.

# %%
from tensorflow.python.keras import backend as K
from gpflow.config import default_float

K.set_floatx("float64")
assert default_float() == np.float64


def build_mean_function():
    inputs = tf.keras.layers.Input(shape=(1,))
    x = tf.keras.layers.Dense(64, activation="relu")(inputs)
    x = tf.keras.layers.Dense(64, activation="relu")(x)
    outputs = tf.keras.layers.Dense(1)(x)
    return tf.keras.Model(inputs=inputs, outputs=outputs)


# %% [markdown]
# ## Build the GP metamodel
# Metalearning boils down to learning a good prior that can generalize to new tasks with a small number of data points. This framework is prevalent in GP modeling, where we usually maximize the marginal likelihood to learn a good set of hyperparameters that specify the GP prior.
#
Exemplo n.º 9
0
import tensorflow.compat.v1 as tf
import numpy as np
import tensorflow.python.keras.backend as K
from keras.models import load_model
from werkzeug.utils import secure_filename, redirect
import cv2

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.disable_v2_behavior()
app = Flask(__name__)

config = tf.ConfigProto(device_count={'GPU': 0})
sess = tf.Session(config=config)
graph = tf.get_default_graph()
set_session(sess)
K.set_floatx('float16')
#model = load_model('/home/code_broom/PycharmProjects/DistractedDrivers/Models/class_7/class_7_flask_trial.h5')


@app.route('/', methods=['GET', 'POST'])
def main_page():
    if request.method == 'POST':
        file = request.files['file']
        filename = secure_filename(file.filename)
        file.save(os.path.join('uploads', filename))
        return redirect(url_for('prediction', filename=filename))
    return render_template('index.html')


@app.route('/prediction/<filename>')
def prediction(filename):
Exemplo n.º 10
0
def train_ctl(model_func, params):
    image_width = params['image_width']
    image_height = params['image_height']
    image_format = params['image_format']
    distort_color = params['distort_color']
    momentum = params['momentum']
    loss_scale = params['loss_scale']
    data_dir = params['data_dir']
    data_idx_dir = params['data_idx_dir']
    batch_size = params['batch_size']
    num_iter = params['num_iter']
    iter_unit = params['iter_unit']
    log_dir = params['log_dir']
    export_dir = params['export_dir']
    tensorboard_dir = params['tensorboard_dir']
    display_every = params['display_every']
    precision = params['precision']
    dali_mode = params['dali_mode']
    use_xla = params['use_xla']

    if data_dir is not None:
        file_format = os.path.join(data_dir, '%s-*')
        train_files = sorted(tf.io.gfile.glob(file_format % 'train'))
        valid_files = sorted(tf.io.gfile.glob(file_format % 'validation'))
        num_train_samples = common.get_num_records(train_files)
        num_valid_samples = common.get_num_records(valid_files)
    else:
        num_train_samples = 1281982
        num_valid_samples = 5000

    train_idx_files = None
    valid_idx_files = None
    if data_idx_dir is not None:
        file_format = os.path.join(data_idx_dir, '%s-*')
        train_idx_files = sorted(tf.io.gfile.glob(file_format % 'train'))
        valid_idx_files = sorted(tf.io.gfile.glob(file_format % 'validation'))

    if iter_unit.lower() == 'epoch':
        num_epochs = num_iter
        nstep_per_epoch = num_train_samples // (batch_size * hvd.size())
        nstep_per_valid = num_valid_samples // (batch_size * hvd.size())
    else:
        assert iter_unit.lower() == 'batch'
        num_epochs = 1
        nstep_per_epoch = min(num_iter,
                              num_train_samples // (batch_size * hvd.size()))
        nstep_per_valid = min(10,
                              num_valid_samples // (batch_size * hvd.size()))

    if export_dir:
        assert os.path.exists(export_dir)
        save_format = export_dir + "/saved_model_rn50.h5"

    if use_xla:
        tf.config.optimizer.set_jit(True)

    # Horovod: pin GPU to be used to process local rank (one GPU per process)
    gpus = tf.config.experimental.list_physical_devices('GPU')
    for gpu in gpus:
        tf.config.experimental.set_memory_growth(gpu, True)
    if gpus:
        tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()],
                                                   'GPU')

    if tensorboard_dir and hvd.rank() == 0:
        assert os.path.exists(tensorboard_dir)
        summary_writer = tf.summary.create_file_writer(tensorboard_dir)
    else:
        summary_writer = None

    if precision == 'fp16':
        if tf.__version__ >= "2.4.0":
            policy = keras.mixed_precision.Policy('mixed_float16')
            keras.mixed_precision.set_global_policy(policy)
        else:
            policy = keras.mixed_precision.experimental.Policy(
                'mixed_float16', loss_scale)
            keras.mixed_precision.experimental.set_policy(policy)

    lr_schedule = common.create_piecewise_constant_decay_with_warmup(
        batch_size=batch_size * hvd.size(),
        epoch_size=num_train_samples,
        warmup_epochs=common.LR_SCHEDULE[0][1],
        boundaries=list(p[1] for p in common.LR_SCHEDULE[1:]),
        multipliers=list(p[0] for p in common.LR_SCHEDULE),
        compute_lr_on_cpu=True)
    opt = keras.optimizers.SGD(learning_rate=lr_schedule, momentum=momentum)

    backend.set_image_data_format(image_format)
    dtype = 'float16' if precision == 'fp16' else 'float32'
    backend.set_floatx(dtype)
    model = model_func(num_classes=image_processing.NUM_CLASSES,
                       batch_size=batch_size)

    loss_func = keras.losses.SparseCategoricalCrossentropy()

    train_top1 = tf.keras.metrics.SparseTopKCategoricalAccuracy(
        k=1, name='train_top1')
    train_top5 = tf.keras.metrics.SparseTopKCategoricalAccuracy(
        k=5, name='train_top5')

    val_loss = tf.keras.metrics.Mean(name='val_loss', dtype=tf.float32)

    val_top1 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1,
                                                              name='val_top1')
    val_top5 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5,
                                                              name='val_top5')

    if log_dir:
        # We save check points only when using the real data.
        assert data_dir, "--data_dir cannot be empty when using --log_dir"
        assert os.path.exists(log_dir)
        ckpt = tf.train.Checkpoint(epoch=tf.Variable(0),
                                   optimizer=opt,
                                   net=model)
        manager = tf.train.CheckpointManager(ckpt,
                                             log_dir,
                                             max_to_keep=3,
                                             checkpoint_name="model-ckpt")

    @tf.function
    def train_step(inputs, first_batch):
        images, labels = inputs

        with tf.GradientTape() as tape:
            predictions = model(images, training=True)
            loss = loss_func(labels, predictions)
            loss += tf.reduce_sum(model.losses)
            loss_copy = loss
            # Scale the losses
            if precision == 'fp16':
                loss = loss * tf.cast(loss_scale, loss.dtype)

        tape = hvd.DistributedGradientTape(tape)

        old_grads = tape.gradient(loss, model.trainable_variables)

        # Unscale the grads
        if precision == 'fp16':
            loss_scale_reciprocal = 1. / loss_scale
            grads = [
                g * tf.cast(loss_scale_reciprocal, g.dtype)
                if g is not None else None for g in old_grads
            ]
        else:
            grads = old_grads

        opt.apply_gradients(zip(grads, model.trainable_variables))

        train_top1.update_state(labels, predictions)
        train_top5.update_state(labels, predictions)

        if hvd.size() > 1 and first_batch:
            hvd.broadcast_variables(model.variables, root_rank=0)
            hvd.broadcast_variables(opt.variables(), root_rank=0)

        return loss_copy

    @tf.function
    def valid_step(inputs):
        images, labels = inputs
        predictions = model(images, training=False)
        loss = loss_func(labels, predictions)

        val_loss.update_state(loss)
        val_top1.update_state(labels, predictions)
        val_top5.update_state(labels, predictions)

    if data_dir is not None:
        num_preproc_threads = 4 if dali_mode else 10
        train_input = image_processing.image_set(
            train_files,
            batch_size,
            image_height,
            image_width,
            training=True,
            distort_color=distort_color,
            deterministic=False,
            num_threads=num_preproc_threads,
            use_dali=dali_mode,
            idx_filenames=train_idx_files)

        valid_input = image_processing.image_set(
            valid_files,
            batch_size,
            image_height,
            image_width,
            training=False,
            distort_color=False,
            deterministic=False,
            num_threads=num_preproc_threads,
            use_dali=dali_mode,
            idx_filenames=valid_idx_files)
    else:
        if dali_mode:
            raise ValueError("Must provide --data_dir if Dali is enabled")
        else:
            train_input = image_processing.fake_image_set(
                batch_size, image_height, image_width)

    global_steps = 0
    log_steps = display_every
    try:

        initial_epoch = 0
        if log_dir:
            ckpt.restore(manager.latest_checkpoint)
            if manager.latest_checkpoint:
                if hvd.rank() == 0:
                    print("Restored from {}".format(manager.latest_checkpoint))
                initial_epoch = max(
                    int(re.findall(r'\d+', manager.latest_checkpoint)[0]),
                    initial_epoch)
            else:
                if hvd.rank() == 0:
                    print("Initializing from scratch.")

        # Training Loop
        for epoch in range(num_epochs):
            if epoch < initial_epoch:
                continue
            # on_epoch_begin
            epoch_start = time.time()

            total_loss = 0.0
            num_batches = 0
            train_top1.reset_states()
            train_top5.reset_states()

            if not dali_mode:
                train_iter = iter(train_input)
            for _ in range(nstep_per_epoch):
                # on_batch_begin
                global_steps += 1
                if global_steps == 1:
                    start_time = time.time()

                if global_steps == 1 and hvd.rank() == 0 and summary_writer:
                    tf.summary.trace_on(graph=True, profiler=True)

                if not dali_mode:
                    x = next(train_iter)
                else:
                    x = train_input.get_device_minibatches()
                total_loss += train_step(x, global_steps == 1)

                if global_steps == 1 and hvd.rank() == 0 and summary_writer:
                    with summary_writer.as_default():
                        tf.summary.trace_export(
                            name="train_step",
                            step=0,
                            profiler_outdir=tensorboard_dir)

                # on_batch_end
                if global_steps % log_steps == 0:
                    timestamp = time.time()
                    elapsed_time = timestamp - start_time
                    examples_per_second = \
                        (batch_size * hvd.size() * log_steps) / elapsed_time
                    if hvd.rank() == 0:
                        print("global_step: %d images_per_sec: %.1f" %
                              (global_steps, examples_per_second))
                    start_time = timestamp
                num_batches += 1

            train_loss = total_loss / num_batches

            # on_epoch_end
            epoch_run_time = time.time() - epoch_start
            if hvd.rank() == 0:
                print("epoch: %d time_taken: %.1f" % (epoch, epoch_run_time))

            if data_dir is not None:
                val_loss.reset_states()
                val_top1.reset_states()
                val_top5.reset_states()

                if not dali_mode:
                    test_iter = iter(valid_input)
                for _ in range(nstep_per_valid):
                    if not dali_mode:
                        x = next(test_iter)
                    else:
                        x = valid_input.get_device_minibatches()
                    valid_step(x)

            if log_dir:
                ckpt.epoch.assign_add(1)
                if hvd.rank() == 0:
                    save_path = manager.save()
                    print("Saved checkpoint for epoch {}: {}".format(
                        int(ckpt.epoch), save_path))

            if hvd.rank() == 0:
                output_str = (
                    "loss: {} - top1: {} - top5: {} - val_loss: {} - "
                    "val_top1: {} - val_top5: {}")
                print(
                    output_str.format(train_loss, train_top1.result(),
                                      train_top5.result(), val_loss.result(),
                                      val_top1.result(), val_top5.result()))

            if hvd.rank() == 0 and summary_writer:
                with summary_writer.as_default():
                    tf.summary.scalar('train_loss', train_loss, global_steps)
                    tf.summary.scalar('train_top1', train_top1.result(),
                                      global_steps)
                    tf.summary.scalar('train_top5', train_top5.result(),
                                      global_steps)
                    tf.summary.scalar('val_loss', val_loss.result(),
                                      global_steps)
                    tf.summary.scalar('val_top1', val_top1.result(),
                                      global_steps)
                    tf.summary.scalar('val_top5', val_top5.result(),
                                      global_steps)

        if hvd.rank() == 0 and summary_writer:
            summary_writer.close()

    except KeyboardInterrupt:
        print("Keyboard interrupt")

    if export_dir and hvd.rank() == 0:
        model.save(save_format)
        print(f"The model is saved to {save_format}")
Exemplo n.º 11
0
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.keras.applications.imagenet_utils import preprocess_input
from tensorflow.python.keras.datasets import cifar10
# import pydot
# from IPython.display import SVG
from tensorflow.python.keras.utils.vis_utils import model_to_dot
from tensorflow.python.keras.utils import plot_model
from tensorflow.python.keras.initializers import glorot_uniform
# import scipy.misc
# from matplotlib.pyplot import imshow
# # %matplotlib inline
from tensorflow.python.keras import backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
K.set_floatx('float32')
import os
import json


def identity_block(X, f, filters, stage, block):
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    F1, F2, F3 = filters

    X_shortcut = X

    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(1, 1),
Exemplo n.º 12
0
# %%
num_meta_tasks = 20
num_test_tasks = 5
num_datapoints = 5
meta, test = generate_meta_and_test_tasks(num_datapoints, num_meta_tasks,
                                          num_test_tasks)

# %% [markdown]
# ## Create the mean function
# We will use a Keras model Deep Neural Network as mean function.

# %%
from tensorflow.python.keras import backend as K
from gpflow.config import default_float

K.set_floatx('float64')
assert default_float() == np.float64


def build_mean_function():
    inputs = tf.keras.layers.Input(shape=(1, ))
    x = tf.keras.layers.Dense(64, activation="relu")(inputs)
    x = tf.keras.layers.Dense(64, activation="relu")(x)
    outputs = tf.keras.layers.Dense(1)(x)
    return tf.keras.Model(inputs=inputs, outputs=outputs)


# %% [markdown]
# ## Build the GP metamodel
# Metalearning boils down to learning a good prior that can generalize to new tasks with a small number of data points. This framework is prevalent in GP modeling, where we usually maximize the marginal likelihood to learn a good set of hyperparameters that specify the GP prior.
#
Exemplo n.º 13
0
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import tensorflow.python.keras.backend as K
dtype = 'float16'
K.set_floatx(dtype)
K.set_epsilon(1e-4)
# import numpy as np
import os, glob
# os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
from shutil import copy
from utils.logger import set_logger
from dataset_generator import DataGenerator
from Dataset.digest_train_csv import Digestive
from tensorflow.python.keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau
from segcaps import CapsNet, Container
from competition_losses_metrics import *
from config import conf


class SteelSeg:
    def __init__(self, conf):
        self.conf = conf

        self.data_reader = DataGenerator(conf,
                                         Digestive(conf).masks_at_least(1))

        self.model = self.build()

        # if reload_step is not zero it means we are reading data from already existing folders
        self.log_dir, self.model_dir, self.save_dir = self.set_dirs()
Exemplo n.º 14
0
    def __init__(self,
                 fields=None,
                 variables=None,
                 hidden_layers=None,
                 activation="tanh",
                 output_activation="linear",
                 res_net=False,
                 kernel_initializer=None,
                 bias_initializer=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 dtype=None,
                 trainable=True,
                 **kwargs):
        # check data-type.
        if dtype is None:
            dtype = K.floatx()
        elif not K.floatx() == dtype:
            K.set_floatx(dtype)
        # prepare hidden layers.
        if hidden_layers is None:
            hidden_layers = []
        else:
            hidden_layers = to_list(hidden_layers)
        # check for copy constructor.
        if all([x in kwargs for x in ('inputs', 'outputs', 'layers')]):
            self._inputs = kwargs['inputs'].copy()
            self._outputs = kwargs['outputs'].copy()
            self._layers = kwargs['layers'].copy()
            self._set_model()
            return
        # prepare kernel initializers.
        activations, def_biasinit, def_kerinit = \
            prepare_default_activations_and_initializers(
            len(hidden_layers) * [activation] + [output_activation]
        )
        if kernel_initializer is None:
            kernel_initializer = def_kerinit
        elif isinstance(kernel_initializer, (float, int)):
            kernel_initializer = default_weight_initializer(
                len(hidden_layers) * [activation] + [output_activation],
                'constant',
                scale=kernel_initializer)
        else:
            kernel_initializer = [
                kernel_initializer for l in len(hidden_layers) * [activation] +
                [output_activation]
            ]
        # prepare bias initializers.
        if bias_initializer is None:
            bias_initializer = def_biasinit
        elif isinstance(bias_initializer, (float, int)):
            bias_initializer = default_weight_initializer(
                len(hidden_layers) * [activation] + [output_activation],
                'constant',
                scale=bias_initializer)
        else:
            bias_initializer = [
                bias_initializer for l in len(hidden_layers) * [activation] +
                [output_activation]
            ]
        # prepare regularizers.
        kernel_regularizer = default_regularizer(kernel_regularizer)
        bias_regularizer = default_regularizer(bias_regularizer)
        # prepares fields.
        fields = to_list(fields)
        if all([isinstance(fld, str) for fld in fields]):
            output_fields = [
                Field(
                    name=fld,
                    dtype=dtype,
                    kernel_initializer=kernel_initializer[-1],
                    bias_initializer=bias_initializer[-1],
                    kernel_regularizer=kernel_regularizer,
                    bias_regularizer=bias_regularizer,
                    trainable=trainable,
                ) for fld in fields
            ]
        elif all([validations.is_field(fld) for fld in fields]):
            output_fields = fields
        else:
            raise TypeError('Please provide a "list" of field names of' +
                            ' type "String" or "Field" objects.')
        # prepare inputs/outputs/layers.
        inputs = []
        layers = []
        variables = to_list(variables)
        if all([isinstance(var, Functional) for var in variables]):
            for var in variables:
                inputs += var.outputs
            # for var in variables:
            #     for lay in var.layers:
            #         layers.append(lay)
        else:
            raise TypeError(
                "Input error: Please provide a `list` of `Functional`s. \n"
                "Provided - {}".format(variables))

        # Input layers.
        if len(inputs) == 1:
            net_input = inputs[0]
        else:
            layer = Concatenate(name=graph_unique_name('conct'))
            net_input = layer(inputs)

        # Define the output network.
        net = [net_input]

        # define the ResNet networks.
        if res_net is True:
            res_layers = []
            res_outputs = []
            for rl in ["U", "V", "H"]:
                layers.append(
                    Dense(hidden_layers[0],
                          kernel_initializer=kernel_initializer[0],
                          bias_initializer=bias_initializer[0],
                          kernel_regularizer=kernel_regularizer,
                          bias_regularizer=bias_regularizer,
                          trainable=trainable,
                          dtype=dtype,
                          name=graph_unique_name(
                              "DRes" + rl + "{:d}b".format(hidden_layers[0]))))
                res_output = layers[-1](net_input)
                # Apply the activation.
                if activations[0].activation.__name__ != 'linear':
                    layers.append(activations[0])
                    res_outputs.append(layers[-1](res_output))
            net[-1] = res_outputs[-1]

        for nLay, nNeuron in enumerate(hidden_layers):
            # Add the layer.
            layer = Dense(nNeuron,
                          kernel_initializer=kernel_initializer[nLay],
                          bias_initializer=bias_initializer[nLay],
                          kernel_regularizer=kernel_regularizer,
                          bias_regularizer=bias_regularizer,
                          trainable=trainable,
                          dtype=dtype,
                          name=graph_unique_name("D{:d}b".format(nNeuron)))
            layers.append(layer)
            net[-1] = layer(net[-1])
            # Apply the activation.
            if activations[
                    nLay].activation.__name__ != 'linear':  #nLay<len(hidden_layers)-1 and
                layer = activations[nLay]
                layers.append(layer)
                net[-1] = layer(net[-1])
            # Add the resnet layer
            if res_net is True:
                layer = Lambda(lambda xs: (1 - xs[0]) * xs[1] + xs[0] * xs[2],
                               name=graph_unique_name("ResLayer"))
                net[-1] = layer([net[-1]] + res_outputs[:2])

        # store output layers.
        for out in output_fields:
            layers.append(out)

        # Assign to the output variable
        if len(net) == 1:
            net_output = net[0]
        else:
            raise ValueError("Legacy for Enrichment: Must be updated. ")
            layer = Concatenate(name=graph_unique_name('conct'))
            net_output = layer(net)

        # Define the final outputs of each network
        outputs = []
        for out in output_fields:
            # add the activation on the output.
            if activations[-1].activation.__name__ != 'linear':
                layer = activations[-1]
                layers.append(layer)
                outputs.append(layer(out(net_output)))
            else:
                outputs.append(out(net_output))

        self._inputs = inputs
        self._outputs = outputs
        self._layers = layers
        self._set_model()