コード例 #1
0
def make_meta_conv2d_block(in_nc, out_nc, kernel_size=3, stride=1, padding=None, dilation=1, groups=1,
                           padding_mode='reflect', norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU(True), dropout=None):
    """ Defines a Hyper convolution block with a normalization layer, an activation layer, and an optional
    dropout layer.

    Args:
        in_nc (int): Input number of channels
        out_nc (int): Output number of channels
        kernel_size (int): Convolution kernel size
        stride (int): Convolution stride
        padding (int, optional): The amount of padding for the height and width dimensions
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        padding_mode (str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
        norm_layer (nn.Module): Type of feature normalization layer
        act_layer (nn.Module): Type of activation layer
        dropout (float): If specified, enables dropout with the given probability
    """
    assert dropout is None or isinstance(dropout, float)
    padding = kernel_size // 2 if padding is None else padding
    layers = [MetaConv2d(in_nc, out_nc, kernel_size, stride, padding, dilation, groups, padding_mode)]
    if norm_layer is not None:
        layers.append(norm_layer(out_nc))
    if act_layer is not None:
        layers.append(act_layer)
    if dropout is not None:
        layers.append(nn.Dropout(dropout))

    return MetaSequential(*layers)
コード例 #2
0
    def __init__(self,
                 in_nc,
                 out_nc,
                 kernel_size=3,
                 stride=1,
                 expand_ratio=1,
                 norm_layer=nn.BatchNorm2d,
                 act_layer=nn.ReLU6(inplace=True),
                 padding_mode='reflect'):
        super(HyperPatchInvertedResidual, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = int(round(in_nc * expand_ratio))
        self.use_res_connect = self.stride == 1 and in_nc == out_nc

        layers = []
        if expand_ratio != 1:
            # pw
            layers.append(
                make_meta_patch_conv2d_block(in_nc,
                                             hidden_dim,
                                             1,
                                             norm_layer=norm_layer,
                                             act_layer=act_layer))
        layers.extend([
            # dw
            make_meta_patch_conv2d_block(hidden_dim,
                                         hidden_dim,
                                         kernel_size,
                                         stride=stride,
                                         groups=hidden_dim,
                                         norm_layer=norm_layer,
                                         act_layer=act_layer,
                                         padding_mode=padding_mode),
            # pw-linear
            make_meta_patch_conv2d_block(hidden_dim,
                                         out_nc,
                                         1,
                                         stride=stride,
                                         norm_layer=norm_layer,
                                         act_layer=None)
        ])
        self.conv = MetaSequential(*layers)
コード例 #3
0
    def __init__(self,
                 feat_channels,
                 signal_channels,
                 num_classes=3,
                 kernel_sizes=3,
                 level_layers=1,
                 level_channels=None,
                 norm_layer=nn.BatchNorm2d,
                 act_layer=nn.ReLU6(inplace=True),
                 out_kernel_size=1,
                 expand_ratio=1,
                 groups=1,
                 weight_groups=1,
                 with_out_fc=False,
                 dropout=None,
                 coords_res=None):
        super(MultiScaleDecoder, self).__init__()
        if isinstance(kernel_sizes, numbers.Number):
            kernel_sizes = (kernel_sizes, ) * len(level_channels)
        if isinstance(level_layers, numbers.Number):
            level_layers = (level_layers, ) * len(level_channels)
        if isinstance(expand_ratio, numbers.Number):
            expand_ratio = (expand_ratio, ) * len(level_channels)
        assert len(kernel_sizes) == len(level_channels), \
            f'kernel_sizes ({len(kernel_sizes)}) must be of size {len(level_channels)}'
        assert len(level_layers) == len(level_channels), \
            f'level_layers ({len(level_layers)}) must be of size {len(level_channels)}'
        assert len(expand_ratio) == len(level_channels), \
            f'expand_ratio ({len(expand_ratio)}) must be of size {len(level_channels)}'
        if isinstance(groups, (list, tuple)):
            assert len(groups) == len(
                level_channels
            ), f'groups ({len(groups)}) must be of size {len(level_channels)}'
        self.level_layers = level_layers
        self.levels = len(level_channels)
        self.layer_params = []
        feat_channels = feat_channels[::
                                      -1]  # Reverse the order of the feature channels
        self.coords_cache = {}
        self.weight_groups = weight_groups

        # For each level
        prev_channels = 0
        for level in range(self.levels):
            curr_ngf = feat_channels[level]
            curr_out_ngf = curr_ngf if level_channels is None else level_channels[
                level]
            prev_channels += curr_ngf  # Accommodate the previous number of channels
            curr_layers = []
            kernel_size = kernel_sizes[level]

            # For each layer in the current level
            for layer in range(self.level_layers[level]):
                if (not with_out_fc) and (level == (self.levels - 1) and
                                          (layer
                                           == (self.level_layers[level] - 1))):
                    curr_out_ngf = num_classes
                if kernel_size > 1:
                    curr_layers.append(
                        HyperPatchInvertedResidual(
                            prev_channels + 2,
                            curr_out_ngf,
                            kernel_size,
                            expand_ratio=expand_ratio[level],
                            norm_layer=norm_layer,
                            act_layer=act_layer))
                else:
                    group = groups[level] if isinstance(groups,
                                                        (list,
                                                         tuple)) else groups
                    curr_layers.append(
                        make_hyper_patch_conv2d_block(prev_channels + 2,
                                                      curr_out_ngf,
                                                      kernel_size,
                                                      groups=group))
                prev_channels = curr_out_ngf

            # Add level layers to module
            self.add_module(f'level_{level}', MetaSequential(*curr_layers))

        # Add the last layer
        if with_out_fc:
            out_fc_layers = [nn.Dropout2d(dropout, True)
                             ] if dropout is not None else []
            out_fc_layers.append(
                HyperPatchConv2d(prev_channels,
                                 num_classes,
                                 out_kernel_size,
                                 padding=out_kernel_size // 2))
            self.out_fc = MetaSequential(*out_fc_layers)
        else:
            self.out_fc = None

        # Calculate number of hyper parameters, weight ranges, and total number of hyper parameters per level
        self.hyper_params = 0
        self._ranges = [0]
        self.param_groups = []
        for level in range(self.levels):
            level_layers = getattr(self, f'level_{level}')
            self.hyper_params += level_layers.hyper_params
            self._ranges.append(self.hyper_params)
            self.param_groups.append(level_layers.hyper_params)
        if with_out_fc:
            self.hyper_params += self.out_fc.hyper_params
            self.param_groups.append(self.out_fc.hyper_params)
        self._ranges.append(self.hyper_params)

        # Cache image coordinates
        if coords_res is not None:
            for res in coords_res:
                res_pyd = [(res[0] // 2**i, res[1] // 2**i)
                           for i in range(self.levels)]
                for level_res in res_pyd:
                    self.register_buffer(
                        f'coord{level_res[0]}_{level_res[1]}',
                        self.cache_image_coordinates(*level_res))

        # Initialize signal to weights
        hyper_params = get_hyper_params(self)
        min_unit = max(weight_groups)
        signal_features = divide_feature(signal_channels,
                                         hyper_params,
                                         min_unit=min_unit)
        init_signal2weights(self,
                            list(signal_features),
                            weight_groups=weight_groups)
        self.hyper_params = sum(hyper_params)
コード例 #4
0
    def __init__(self,
                 feat_channels,
                 in_nc=3,
                 num_classes=3,
                 kernel_sizes=3,
                 level_layers=1,
                 norm_layer=nn.BatchNorm2d,
                 act_layer=nn.ReLU6(inplace=True),
                 out_kernel_size=1,
                 expand_ratio=1,
                 with_out_fc=False,
                 dropout=None):
        super(MultiScaleDecoder, self).__init__()
        if isinstance(kernel_sizes, numbers.Number):
            kernel_sizes = (kernel_sizes, ) * len(feat_channels)
        if isinstance(level_layers, numbers.Number):
            level_layers = (level_layers, ) * len(feat_channels)
        assert len(kernel_sizes) == len(feat_channels), \
            f'kernel_sizes ({len(kernel_sizes)}) must be of size {len(feat_channels)}'
        assert len(level_layers) == len(feat_channels), \
            f'level_layers ({len(level_layers)}) must be of size {len(feat_channels)}'
        self.level_layers = level_layers
        self.levels = len(level_layers)
        self.layer_params = []
        feat_channels = feat_channels[::
                                      -1]  # Reverse the order of the feature channels

        # For each level
        prev_channels = 0
        for level in range(self.levels):
            curr_ngf = feat_channels[level]
            prev_channels += curr_ngf  # Accommodate the previous number of channels
            curr_layers = []
            kernel_size = kernel_sizes[level]

            # For each layer in the current level
            for layer in range(self.level_layers[level]):
                if (not with_out_fc) and (level == (self.levels - 1) and
                                          (layer
                                           == (self.level_layers[level] - 1))):
                    curr_ngf = num_classes
                if kernel_size > 1:
                    curr_layers.append(
                        HyperPatchInvertedResidual(prev_channels + 2,
                                                   curr_ngf,
                                                   kernel_size,
                                                   expand_ratio=expand_ratio,
                                                   norm_layer=norm_layer,
                                                   act_layer=act_layer))
                else:
                    curr_layers.append(
                        make_meta_patch_conv2d_block(prev_channels + 2,
                                                     curr_ngf, kernel_size))
                prev_channels = curr_ngf

            # Add level layers to module
            self.add_module(f'level_{level}', MetaSequential(*curr_layers))

        # Add the last layer
        if with_out_fc:
            out_fc_layers = [nn.Dropout2d(dropout, True)
                             ] if dropout is not None else []
            out_fc_layers.append(
                MetaPatchConv2d(prev_channels,
                                num_classes,
                                out_kernel_size,
                                padding=out_kernel_size // 2))
            self.out_fc = MetaSequential(*out_fc_layers)
        else:
            self.out_fc = None

        # Calculate number of hyper parameters, weight ranges, and total number of hyper parameters per level
        self.hyper_params = 0
        self._ranges = [0]
        self.param_groups = []
        for level in range(self.levels):
            level_layers = getattr(self, f'level_{level}')
            self.hyper_params += level_layers.hyper_params
            self._ranges.append(self.hyper_params)
            self.param_groups.append(level_layers.hyper_params)
        if with_out_fc:
            self.hyper_params += self.out_fc.hyper_params
            self.param_groups.append(self.out_fc.hyper_params)
        self._ranges.append(self.hyper_params)