コード例 #1
0
def semantic_upsample(x, n_upsample, n_filters=64, ndim=2, target=None):
    """
    Performs iterative rounds of 2x upsampling and
    convolutions with a 3x3 filter to remove aliasing effects

    Args:
        x (tensor): The input tensor to be upsampled
        n_upsample (int): The number of 2x upsamplings
        n_filters (int, optional): Defaults to 256. The number of filters for
            the 3x3 convolution
        target (tensor, optional): Defaults to None. A tensor with the target
            shape. If included, then the final upsampling layer will reshape
            to the target tensor's size
        ndim: The spatial dimensions of the input data.
            Default is 2, but it also works with 3

    Returns:
        The upsampled tensor
    """

    acceptable_ndims = [2, 3]
    if ndim not in acceptable_ndims:
        raise ValueError('Only 2 and 3 dimensional networks are supported')

    for i in range(n_upsample):
        if ndim == 2:
            x = Conv2D(n_filters, (3, 3), strides=(1, 1),
                       padding='same', data_format='channels_last')(x)

            if i == n_upsample - 1 and target is not None:
                x = UpsampleLike()([x, target])
            else:
                x = UpSampling2D(size=(2, 2))(x)
        else:
            x = Conv3D(n_filters, (3, 3, 3), strides=(1, 1, 1),
                       padding='same', data_format='channels_last')(x)

            if i == n_upsample - 1 and target is not None:
                x = UpsampleLike()([x, target])
            else:
                x = UpSampling3D(size=(2, 2, 2))(x)

    if n_upsample == 0:
        if ndim == 2:
            x = Conv2D(n_filters, (3, 3), strides=(1, 1),
                       padding='same', data_format='channels_last')(x)
        else:
            x = Conv3D(n_filters, (3, 3, 3), strides=(1, 1, 1),
                       padding='same', data_format='channels_last')(x)

        if target is not None:
            x = UpsampleLike()([x, target])

    return x
コード例 #2
0
def create_pyramid_level(backbone_input,
                         upsamplelike_input=None,
                         addition_input=None,
                         level=5,
                         ndim=2,
                         feature_size=256):
    """Create a pyramid layer from a particular backbone input layer.

    Args:
        backbone_input (layer): Backbone layer to use to create they pyramid layer
        upsamplelike_input ([type], optional): Defaults to None. Input to use
            as a template for shape to upsample to
        addition_input (layer, optional): Defaults to None. Layer to add to
            pyramid layer after conv and upsample
        level (int, optional): Defaults to 5. Level to use in layer names
        feature_size (int, optional): Defaults to 256. Number of filters for
            convolutional layer
        ndim: The spatial dimensions of the input data. Default is 2,
            but it also works with 3
    Returns:
        (pyramid final, pyramid upsample): Pyramid layer after processing,
            upsampled pyramid layer
    """

    acceptable_ndims = {2, 3}
    if ndim not in acceptable_ndims:
        raise ValueError('Only 2 and 3 dimensional networks are supported')

    reduced_name = 'C%s_reduced' % level
    upsample_name = 'P%s_upsampled' % level
    addition_name = 'P%s_merged' % level
    final_name = 'P%s' % level

    # Apply 1x1 conv to backbone layer
    if ndim == 2:
        pyramid = Conv2D(feature_size, (1, 1), strides=(1, 1),
                         padding='same', name=reduced_name)(backbone_input)
    else:
        pyramid = Conv3D(feature_size, (1, 1, 1), strides=(1, 1, 1),
                         padding='same', name=reduced_name)(backbone_input)

    # Upsample pyramid input
    if upsamplelike_input is not None:
        pyramid_upsample = UpsampleLike(name=upsample_name)(
            [pyramid, upsamplelike_input])
    else:
        pyramid_upsample = None

    # Add and then 3x3 conv
    if addition_input is not None:
        pyramid = Add(name=addition_name)([pyramid, addition_input])

    if ndim == 2:
        pyramid_final = Conv2D(feature_size, (3, 3), strides=(1, 1),
                               padding='same', name=final_name)(pyramid)
    else:
        pyramid_final = Conv3D(feature_size, (3, 3, 3), strides=(1, 1, 1),
                               padding='same', name=final_name)(pyramid)

    return pyramid_final, pyramid_upsample
コード例 #3
0
ファイル: retinanet.py プロジェクト: ebouilhol/deepcell-tf
def __create_pyramid_features(C3, C4, C5, feature_size=256):
    """Creates the FPN layers on top of the backbone features.

    Args:
        C3: Feature stage C3 from the backbone.
        C4: Feature stage C4 from the backbone.
        C5: Feature stage C5 from the backbone.
        feature_size: The feature size to use for the resulting feature levels.

    Returns:
        A list of feature levels [P3, P4, P5, P6, P7].
    """
    # upsample C5 to get P5 from the FPN paper
    P5 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C5_reduced')(C5)
    P5_upsampled = UpsampleLike(name='P5_upsampled')([P5, C4])
    P5 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P5')(P5)

    # add P5 elementwise to C4
    P4 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C4_reduced')(C4)
    P4 = Add(name='P4_merged')([P5_upsampled, P4])
    P4_upsampled = UpsampleLike(name='P4_upsampled')([P4, C3])
    P4 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P4')(P4)

    # add P4 elementwise to C3
    P3 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C3_reduced')(C3)
    P3 = Add(name='P3_merged')([P4_upsampled, P3])
    P3 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P3')(P3)

    # "P6 is obtained via a 3x3 stride-2 conv on C5"
    P6 = Conv2D(feature_size, kernel_size=3, strides=2, padding='same', name='P6')(C5)

    # "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
    P7 = Activation('relu', name='C6_relu')(P6)
    P7 = Conv2D(feature_size, kernel_size=3, strides=2, padding='same', name='P7')(P7)

    return [P3, P4, P5, P6, P7]
コード例 #4
0
ファイル: retinamask.py プロジェクト: sid6155330/deepcell-tf
def retinanet_mask(inputs,
                   backbone_dict,
                   num_classes,
                   frames_per_batch=1,
                   backbone_levels=['C3', 'C4', 'C5'],
                   pyramid_levels=['P3', 'P4', 'P5', 'P6', 'P7'],
                   retinanet_model=None,
                   anchor_params=None,
                   nms=True,
                   panoptic=False,
                   class_specific_filter=True,
                   crop_size=(14, 14),
                   mask_size=(28, 28),
                   name='retinanet-mask',
                   roi_submodels=None,
                   max_detections=100,
                   score_threshold=0.05,
                   nms_threshold=0.5,
                   mask_dtype=K.floatx(),
                   **kwargs):
    """Construct a RetinaNet mask model on top of a retinanet bbox model.
    Uses the retinanet bbox model and appends layers to compute masks.

    Args:
        inputs (tensor): List of tensorflow.keras.layers.Input.
            The first input is the image, the second input the blob of masks.
        backbone_dict (dict): A dictionary with the backbone layers.
        num_classes (int): Integer, number of classes to classify.
        frames_per_batch (int): Size of z axis in generated batches.
            If equal to 1, assumes 2D data.
        backbone_levels (list): The backbone levels to be used.
            to create the feature pyramid. Defaults to ['C3', 'C4', 'C5'].
        pyramid_levels (list): The pyramid levels to attach regression and
            classification heads to. Defaults to ['P3', 'P4', 'P5', 'P6', 'P7'].
        retinanet_model (tensorflow.keras.Model): RetinaNet model that predicts
            regression and classification values.
        anchor_params (AnchorParameters): Struct containing anchor parameters.
        nms (bool): Whether to use non-maximum suppression
            for the filtering step.
        panoptic (bool): Flag for adding the semantic head for panoptic
            segmentation tasks. Defaults to false.
        class_specific_filter (bool): Use class specific filtering.
        crop_size (tuple): 2-length tuple for the x-y size of the crops.
            Used to create default roi_submodels.
        mask_size (tuple): 2-length tuple for the x-y size of the masks.
            Used to create default roi_submodels.
        name (str): Name of the model.
        roi_submodels (list): Submodels for processing ROIs.
        max_detections (int): The maximum number of detections allowed.
        score_threshold (float): Minimum score for the FilterDetections layer.
        nms_threshold (float): Minimimum NMS for the FilterDetections layer.
        mask_dtype (str): Dtype to use for mask tensors.
        kwargs (dict): Additional kwargs to pass to the retinanet bbox model.

    Returns:
        tensorflow.keras.Model: Model with inputs as input and as output
            the output of each submodel for each pyramid level and the
            detections. The order is as defined in submodels.

            ```
            [
                regression, classification, other[0], ...,
                boxes_masks, boxes, scores, labels, masks, other[0], ...
            ]
            ```

    """
    if anchor_params is None:
        anchor_params = AnchorParameters.default

    if roi_submodels is None:
        retinanet_dtype = K.floatx()
        K.set_floatx(mask_dtype)
        roi_submodels = default_roi_submodels(num_classes, crop_size,
                                              mask_size, frames_per_batch,
                                              mask_dtype, retinanet_dtype)
        K.set_floatx(retinanet_dtype)

    image = inputs
    image_shape = Shape()(image)

    if retinanet_model is None:
        retinanet_model = retinanet(inputs=image,
                                    backbone_dict=backbone_dict,
                                    num_classes=num_classes,
                                    backbone_levels=backbone_levels,
                                    pyramid_levels=pyramid_levels,
                                    panoptic=panoptic,
                                    num_anchors=anchor_params.num_anchors(),
                                    frames_per_batch=frames_per_batch,
                                    **kwargs)

    # parse outputs
    regression = retinanet_model.outputs[0]
    classification = retinanet_model.outputs[1]

    if panoptic:
        # Determine the number of semantic heads
        n_semantic_heads = len([
            1 for layer in retinanet_model.layers if 'semantic' in layer.name
        ])

        # The  panoptic output should not be sent to filter detections
        other = retinanet_model.outputs[2:-n_semantic_heads]
        semantic = retinanet_model.outputs[-n_semantic_heads:]
    else:
        other = retinanet_model.outputs[2:]

    features = [
        retinanet_model.get_layer(name).output for name in pyramid_levels
    ]

    # build boxes
    anchors = __build_anchors(anchor_params,
                              features,
                              frames_per_batch=frames_per_batch)
    boxes = RegressBoxes(name='boxes')([anchors, regression])
    boxes = ClipBoxes(name='clipped_boxes')([image, boxes])

    # filter detections (apply NMS / score threshold / select top-k)
    detections = FilterDetections(
        nms=nms,
        nms_threshold=nms_threshold,
        score_threshold=score_threshold,
        class_specific_filter=class_specific_filter,
        max_detections=max_detections,
        name='filtered_detections')([boxes, classification] + other)

    # split up in known outputs and "other"
    boxes = detections[0]
    scores = detections[1]

    # get the region of interest features
    #
    # roi_input = [image_shape, boxes, classification] + features
    # rois = _RoiAlign(crop_size=crop_size)(roi_input)

    fpn = features[0]
    fpn = UpsampleLike()([fpn, image])
    rois = RoiAlign(crop_size=crop_size)([boxes, fpn])

    # execute maskrcnn submodels
    maskrcnn_outputs = [submodel(rois) for _, submodel in roi_submodels]

    # concatenate boxes for loss computation
    trainable_outputs = [
        ConcatenateBoxes(name=name)([boxes, output])
        for (name, _), output in zip(roi_submodels, maskrcnn_outputs)
    ]

    # reconstruct the new output
    outputs = [regression, classification] + other + trainable_outputs + \
        detections + maskrcnn_outputs

    if panoptic:
        outputs += list(semantic)

    model = Model(inputs=inputs, outputs=outputs, name=name)
    model.backbone_levels = backbone_levels
    model.pyramid_levels = pyramid_levels

    return model
コード例 #5
0
def retinamask_bbox(model,
                    nms=True,
                    panoptic=False,
                    num_semantic_heads=1,
                    class_specific_filter=True,
                    name='retinanet-bbox',
                    anchor_params=None,
                    max_detections=300,
                    frames_per_batch=1,
                    crop_size=(14, 14),
                    **kwargs):
    """Construct a RetinaNet model on top of a backbone and adds convenience
    functions to output boxes directly.
    This model uses the minimum retinanet model and appends a few layers
    to compute boxes within the graph. These layers include applying the
    regression values to the anchors and performing NMS.

    Args:
        model (tensorflow.keras.Model): RetinaNet model to append bbox
            layers to. If ``None``, it will create a ``RetinaNet`` model
            using ``kwargs``.
        nms (bool): Whether to use non-maximum suppression
            for the filtering step.
        panoptic (bool): Flag for adding the semantic head for panoptic
            segmentation tasks.
        num_semantic_heads (int): Total number of semantic heads to build.
        class_specific_filter (bool): Whether to use class specific filtering
            or filter for the best scoring class only.
        anchor_params (AnchorParameters): Struct containing anchor parameters.
        max_detections (int): The maximum number of detections allowed.
        frames_per_batch (int): Size of z axis in generated batches.
            If equal to 1, assumes 2D data.
        crop_size (tuple): 2-length tuple for the x-y size of the crops.
            Used to create default ``roi_submodels``.
        kwargs (dict): Additional kwargs to pass to the
            :mod:`deepcell.model_zoo.retinanet.retinanet` model.

    Returns:
        tensorflow.keras.Model: A Model which takes an image as input and
        outputs the detections on the image.
        The order is defined as follows:

        .. code-block:: python

            [
                boxes, scores, labels, other[0], other[1], ...
            ]

    Raises:
        ValueError: the given model does not have a regression or
            classification submodel.
    """

    # if no anchor parameters are passed, use default values
    if anchor_params is None:
        anchor_params = AnchorParameters.default

    # create RetinaNet model
    names = ('regression', 'classification')
    if not all(output in model.output_names for output in names):
        raise ValueError('Input is not a training model (no `regression` '
                         'and `classification` outputs were found, '
                         'outputs are: {}).'.format(model.output_names))

    # compute the anchors
    features = [model.get_layer(l).output for l in model.pyramid_levels]
    anchors = __build_anchors(anchor_params,
                              features,
                              frames_per_batch=frames_per_batch)

    # we expect anchors, regression. and classification values as first output
    regression = model.outputs[0]
    classification = model.outputs[1]
    semantic_classes = [
        1 for layer in model.layers if layer.name.startswith('semantic')
    ]

    # "other" can be any additional output from custom submodels, by default []
    if panoptic:
        # The last output is the panoptic output, which should not be
        # sent to filter detections
        num_semantic_heads = len(semantic_classes)
        other = model.outputs[2:-num_semantic_heads]
        semantic = model.outputs[-num_semantic_heads:]
    else:
        other = model.outputs[2:]
        semantic = []

    # apply predicted regression to anchors
    boxes = RegressBoxes(name='boxes')([anchors, regression])
    boxes = ClipBoxes(name='clipped_boxes')([model.inputs[0], boxes])

    # filter detections (apply NMS / score threshold / select top-k)
    detections = FilterDetections(
        nms=nms,
        class_specific_filter=class_specific_filter,
        max_detections=max_detections,
        name='filtered_detections')([boxes, classification])

    # apply submodels to detections
    image = model.layers[0].output
    boxes = detections[0]

    fpn = features[0]
    fpn = UpsampleLike()([fpn, image])
    rois = RoiAlign(crop_size=crop_size)([boxes, fpn])

    mask_submodel = model.get_layer('mask_submodel')
    masks = [mask_submodel(rois)]

    # add the semantic head's output if needed
    outputs = detections + list(masks) + list(semantic)

    # construct the model
    new_model = Model(inputs=model.inputs, outputs=outputs, name=name)

    image_input = model.inputs[0]
    shape = (1, 1, 4) if frames_per_batch == 1 else (1, 1, 1, 4)
    temp_boxes = K.zeros(shape, name='temp_boxes')
    new_inputs = [image_input, temp_boxes]

    final_model = new_model(new_inputs)
    return Model(inputs=image_input, outputs=final_model)
コード例 #6
0
def create_pyramid_level(backbone_input,
                         upsamplelike_input=None,
                         addition_input=None,
                         upsample_type='upsamplelike',
                         level=5,
                         ndim=2,
                         feature_size=256):
    """Create a pyramid layer from a particular backbone input layer.

    Args:
        backbone_input (layer): Backbone layer to use to create they pyramid layer
        upsamplelike_input (tensor): Optional input to use
            as a template for shape to upsample to
        addition_input (layer): Optional layer to add to
            pyramid layer after convolution and upsampling.
        upsample_type (str, optional): Choice of upsampling methods
            from ['upsamplelike','upsampling2d','upsampling3d'], defaults to 'upsamplelike'.
        level (int): Level to use in layer names, defaults to 5.
        feature_size (int):Number of filters for
            convolutional layer, defaults to 256.
        ndim (int): The spatial dimensions of the input data. Default is 2,
            but it also works with 3

    Returns:
        tuple: Pyramid layer after processing, upsampled pyramid layer

    Raises:
        ValueError: ndim is not 2 or 3
        ValueError: upsample_type not ['upsamplelike','upsampling2d','upsampling3d']
    """

    acceptable_ndims = {2, 3}
    if ndim not in acceptable_ndims:
        raise ValueError('Only 2 and 3 dimensional networks are supported')

    acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
    if upsample_type not in acceptable_upsample:
        raise ValueError(
            'Upsample method not supported. Choose from [\'upsamplelike\','
            '\'upsampling2d\',\'upsampling3d\']')

    reduced_name = 'C{}_reduced'.format(level)
    upsample_name = 'P{}_upsampled'.format(level)
    addition_name = 'P{}_merged'.format(level)
    final_name = 'P{}'.format(level)

    # Apply 1x1 conv to backbone layer
    if ndim == 2:
        pyramid = Conv2D(feature_size, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         name=reduced_name)(backbone_input)
    else:
        pyramid = Conv3D(feature_size, (1, 1, 1),
                         strides=(1, 1, 1),
                         padding='same',
                         name=reduced_name)(backbone_input)

    # Add and then 3x3 conv
    if addition_input is not None:
        pyramid = Add(name=addition_name)([pyramid, addition_input])

    # Upsample pyramid input
    if upsamplelike_input is not None:
        if upsample_type == 'upsamplelike':
            pyramid_upsample = UpsampleLike(name=upsample_name)(
                [pyramid, upsamplelike_input])
        else:
            upsampling = UpSampling2D if ndim == 2 else UpSampling3D
            size = (2, 2) if ndim == 2 else (1, 2, 2)
            pyramid_upsample = upsampling(size=size,
                                          name=upsample_name)(pyramid)
    else:
        pyramid_upsample = None

    if ndim == 2:
        pyramid_final = Conv2D(feature_size, (3, 3),
                               strides=(1, 1),
                               padding='same',
                               name=final_name)(pyramid)
    else:
        pyramid_final = Conv3D(feature_size, (1, 3, 3),
                               strides=(1, 1, 1),
                               padding='same',
                               name=final_name)(pyramid)

    return pyramid_final, pyramid_upsample
コード例 #7
0
def create_pyramid_level(backbone_input,
                         upsamplelike_input=None,
                         addition_input=None,
                         upsample_type='upsamplelike',
                         level=5,
                         ndim=2,
                         lite=False,
                         interpolation='bilinear',
                         feature_size=256,
                         z_axis_convolutions=False):
    """Create a pyramid layer from a particular backbone input layer.

    Args:
        backbone_input (tensorflow.keras.Layer): Backbone layer to use to
            create they pyramid layer.
        upsamplelike_input (tensor): Optional input to use
            as a template for shape to upsample to.
        addition_input (tensorflow.keras.Layer): Optional layer to add to
            pyramid layer after convolution and upsampling.
        upsample_type (str): Choice of upsampling methods
            from ``['upsamplelike','upsampling2d','upsampling3d']``.
        level (int): Level to use in layer names.
        feature_size (int): Number of filters for the convolutional layer.
        ndim (int): The spatial dimensions of the input data.
            Must be either 2 or 3.
        lite (bool): Whether to use depthwise conv instead of regular conv for
            feature pyramid construction
        interpolation (str): Choice of interpolation mode for upsampling
            layers from ``['bilinear', 'nearest']``.

    Returns:
        tuple: Pyramid layer after processing, upsampled pyramid layer

    Raises:
        ValueError: ``ndim`` is not 2 or 3
        ValueError: ``upsample_type`` not in
            ``['upsamplelike','upsampling2d', 'upsampling3d']``
    """
    # Check input to ndims
    acceptable_ndims = {2, 3}
    if ndim not in acceptable_ndims:
        raise ValueError('Only 2 and 3 dimensional networks are supported')

    # Check if inputs to ndim and lite are compatible
    if ndim == 3 and lite:
        raise ValueError('lite models are not compatible with 3 dimensional '
                         'networks')

    # Check input to interpolation
    acceptable_interpolation = {'bilinear', 'nearest'}
    if interpolation not in acceptable_interpolation:
        raise ValueError('Interpolation mode "{}" not supported. '
                         'Choose from {}.'.format(
                             interpolation, list(acceptable_interpolation)))

    # Check input to upsample_type
    acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
    if upsample_type not in acceptable_upsample:
        raise ValueError('Upsample method "{}" not supported. '
                         'Choose from {}.'.format(upsample_type,
                                                  list(acceptable_upsample)))

    reduced_name = 'C{}_reduced'.format(level)
    upsample_name = 'P{}_upsampled'.format(level)
    addition_name = 'P{}_merged'.format(level)
    final_name = 'P{}'.format(level)

    # Apply 1x1 conv to backbone layer
    if ndim == 2:
        pyramid = Conv2D(feature_size, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         name=reduced_name)(backbone_input)
    else:
        pyramid = Conv3D(feature_size, (1, 1, 1),
                         strides=(1, 1, 1),
                         padding='same',
                         name=reduced_name)(backbone_input)

    # Add and then 3x3 conv
    if addition_input is not None:
        pyramid = Add(name=addition_name)([pyramid, addition_input])

    # Upsample pyramid input
    if upsamplelike_input is not None and upsample_type == 'upsamplelike':
        pyramid_upsample = UpsampleLike(name=upsample_name)(
            [pyramid, upsamplelike_input])
    elif upsample_type == 'upsamplelike':
        pyramid_upsample = None
    else:
        upsampling = UpSampling2D if ndim == 2 else UpSampling3D
        size = (2, 2) if ndim == 2 else (1, 2, 2)
        upsampling_kwargs = {
            'size': size,
            'name': upsample_name,
            'interpolation': interpolation
        }
        if ndim > 2:
            del upsampling_kwargs['interpolation']
        pyramid_upsample = upsampling(**upsampling_kwargs)(pyramid)

    if ndim == 2:
        if lite:
            pyramid_final = DepthwiseConv2D((3, 3),
                                            strides=(1, 1),
                                            padding='same',
                                            name=final_name)(pyramid)
        else:
            pyramid_final = Conv2D(feature_size, (3, 3),
                                   strides=(1, 1),
                                   padding='same',
                                   name=final_name)(pyramid)
    else:
        z = 3 if z_axis_convolutions else 1
        pyramid_final = Conv3D(feature_size, (z, 3, 3),
                               strides=(1, 1, 1),
                               padding='same',
                               name=final_name)(pyramid)

    return pyramid_final, pyramid_upsample
コード例 #8
0
def semantic_upsample(x,
                      n_upsample,
                      target=None,
                      n_filters=64,
                      ndim=2,
                      semantic_id=0,
                      upsample_type='upsamplelike',
                      interpolation='bilinear'):
    """Performs iterative rounds of 2x upsampling and
    convolutions with a 3x3 filter to remove aliasing effects.

    Args:
        x (tensor): The input tensor to be upsampled.
        n_upsample (int): The number of 2x upsamplings.
        target (tensor): An optional tensor with the target shape.
        n_filters (int): The number of filters for
            the 3x3 convolution.
        ndim (int): The spatial dimensions of the input data.
            Must be either 2 or 3.
        semantic_id (int): ID of the semantic head.
        upsample_type (str): Choice of upsampling layer to use from
            ``['upsamplelike', 'upsampling2d', 'upsampling3d']``.
        interpolation (str): Choice of interpolation mode for upsampling
            layers from ``['bilinear', 'nearest']``.

    Raises:
        ValueError: ``ndim`` is not 2 or 3.
        ValueError: ``interpolation`` not in ``['bilinear', 'nearest']``.
        ValueError: ``upsample_type`` not in
            ``['upsamplelike','upsampling2d', 'upsampling3d']``.
        ValueError: ``target`` is ``None`` and
            ``upsample_type`` is ``'upsamplelike'``

    Returns:
        tensor: The upsampled tensor.
    """
    # Check input to ndims
    acceptable_ndims = [2, 3]
    if ndim not in acceptable_ndims:
        raise ValueError('Only 2 and 3 dimensional networks are supported')

    # Check input to interpolation
    acceptable_interpolation = {'bilinear', 'nearest'}
    if interpolation not in acceptable_interpolation:
        raise ValueError('Interpolation mode "{}" not supported. '
                         'Choose from {}.'.format(
                             interpolation, list(acceptable_interpolation)))

    # Check input to upsample_type
    acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
    if upsample_type not in acceptable_upsample:
        raise ValueError('Upsample method "{}" not supported. '
                         'Choose from {}.'.format(upsample_type,
                                                  list(acceptable_upsample)))

    # Check that there is a target if upsamplelike is used
    if upsample_type == 'upsamplelike' and target is None:
        raise ValueError('upsamplelike requires a target.')

    conv = Conv2D if ndim == 2 else Conv3D
    conv_kernel = (3, 3) if ndim == 2 else (1, 3, 3)
    upsampling = UpSampling2D if ndim == 2 else UpSampling3D
    size = (2, 2) if ndim == 2 else (1, 2, 2)

    if n_upsample > 0:
        for i in range(n_upsample):
            x = conv(n_filters,
                     conv_kernel,
                     strides=1,
                     padding='same',
                     name='conv_{}_semantic_upsample_{}'.format(
                         i, semantic_id))(x)

            # Define kwargs for upsampling layer
            upsample_name = 'upsampling_{}_semantic_upsample_{}'.format(
                i, semantic_id)

            if upsample_type == 'upsamplelike':
                if i == n_upsample - 1 and target is not None:
                    x = UpsampleLike(name=upsample_name)([x, target])
            else:
                upsampling_kwargs = {
                    'size': size,
                    'name': upsample_name,
                    'interpolation': interpolation
                }

                if ndim > 2:
                    del upsampling_kwargs['interpolation']
                x = upsampling(**upsampling_kwargs)(x)
    else:
        x = conv(n_filters,
                 conv_kernel,
                 strides=1,
                 padding='same',
                 name='conv_final_semantic_upsample_{}'.format(semantic_id))(x)

        if upsample_type == 'upsamplelike' and target is not None:
            upsample_name = 'upsampling_{}_semanticupsample_{}'.format(
                0, semantic_id)
            x = UpsampleLike(name=upsample_name)([x, target])

    return x
コード例 #9
0
def create_pyramid_level(backbone_input,
                         upsamplelike_input=None,
                         addition_input=None,
                         upsample_type='upsamplelike',
                         level=5,
                         ndim=2,
                         lite=False,
                         interpolation='bilinear',
                         feature_size=256):
    """Create a pyramid layer from a particular backbone input layer.

    Args:
        backbone_input (layer): Backbone layer to use to create they pyramid
            layer
        upsamplelike_input (tensor): Optional input to use
            as a template for shape to upsample to
        addition_input (layer): Optional layer to add to
            pyramid layer after convolution and upsampling.
        upsample_type (str, optional): Choice of upsampling methods
            from ['upsamplelike','upsampling2d','upsampling3d'].
            Defaults to 'upsamplelike'.
        level (int): Level to use in layer names, defaults to 5.
        feature_size (int):Number of filters for
            convolutional layer, defaults to 256.
        ndim (int): The spatial dimensions of the input data. Default is 2,
            but it also works with 3
        lite (bool): Whether to use depthwise conv instead of regular conv for
            feature pyramid construction
        interpolation (str): Choice of interpolation mode for upsampling
            layers from ['bilinear', 'nearest']. Defaults to bilinear.

    Returns:
        tuple: Pyramid layer after processing, upsampled pyramid layer

    Raises:
        ValueError: ndim is not 2 or 3
        ValueError: upsample_type not ['upsamplelike','upsampling2d',
            'upsampling3d']
    """
    # Check input to ndims
    acceptable_ndims = {2, 3}
    if ndim not in acceptable_ndims:
        raise ValueError('Only 2 and 3 dimensional networks are supported')

    # Check if inputs to ndim and lite are compatible
    if ndim == 3 and lite:
        raise ValueError('lite == True is not compatible with 3 dimensional '
                         'networks')

    # Check input to interpolation
    acceptable_interpolation = {'bilinear', 'nearest'}
    if interpolation not in acceptable_interpolation:
        raise ValueError('Interpolation mode not supported. Choose from '
                         '["bilinear", "nearest"]')

    # Check input to upsample_type
    acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
    if upsample_type not in acceptable_upsample:
        raise ValueError(
            'Upsample method not supported. Choose from ["upsamplelike",'
            '"upsampling2d", "upsampling3d"]')

    reduced_name = 'C{}_reduced'.format(level)
    upsample_name = 'P{}_upsampled'.format(level)
    addition_name = 'P{}_merged'.format(level)
    final_name = 'P{}'.format(level)

    # Apply 1x1 conv to backbone layer
    if ndim == 2:
        pyramid = Conv2D(feature_size, (1, 1),
                         strides=(1, 1),
                         padding='same',
                         name=reduced_name)(backbone_input)
    else:
        pyramid = Conv3D(feature_size, (1, 1, 1),
                         strides=(1, 1, 1),
                         padding='same',
                         name=reduced_name)(backbone_input)

    # Add and then 3x3 conv
    if addition_input is not None:
        pyramid = Add(name=addition_name)([pyramid, addition_input])

    # Upsample pyramid input
    if upsamplelike_input is not None:
        if upsample_type == 'upsamplelike':
            pyramid_upsample = UpsampleLike(name=upsample_name)(
                [pyramid, upsamplelike_input])
        else:
            upsampling = UpSampling2D if ndim == 2 else UpSampling3D
            size = (2, 2) if ndim == 2 else (1, 2, 2)
            upsampling_kwargs = {
                'size': size,
                'name': upsample_name,
                'interpolation': interpolation
            }
            if ndim > 2:
                del upsampling_kwargs['interpolation']
            pyramid_upsample = upsampling(**upsampling_kwargs)(pyramid)
    else:
        pyramid_upsample = None

    if ndim == 2:
        if lite:
            pyramid_final = DepthwiseConv2D((3, 3),
                                            strides=(1, 1),
                                            padding='same',
                                            name=final_name)(pyramid)
        else:
            pyramid_final = Conv2D(feature_size, (3, 3),
                                   strides=(1, 1),
                                   padding='same',
                                   name=final_name)(pyramid)
    else:
        pyramid_final = Conv3D(feature_size, (1, 3, 3),
                               strides=(1, 1, 1),
                               padding='same',
                               name=final_name)(pyramid)

    return pyramid_final, pyramid_upsample