def __init__(self):
        super().__init__()

        factor = 1
        base_filters = [64, 128, 256, 512, 512, 2]
        filters = [f * factor for f in base_filters]
        self.convs = trw.layers.convs_2d(
            6,
            filters,
            convolution_kernels=[4, 4, 4, 4, 4, 1],
            strides=[2, 2, 2, 2, 1, 1],
            pooling_size=None,
            dropout_probability=0.2,
            activation=functools.partial(nn.LeakyReLU, negative_slope=0.1),
            last_layer_is_output=True,
            config=default_layer_config(dimensionality=None,
                                        norm_type=NormType.InstanceNorm))

        self.shapes = [64, 128, 256, 256]
        self.networks = nn.ModuleList()
        for s in self.shapes:
            self.networks.append(
                trw.layers.convs_2d(6,
                                    filters,
                                    convolution_kernels=[4, 4, 4, 4, 4, 1],
                                    strides=[2, 2, 2, 2, 1, 1],
                                    pooling_size=None,
                                    dropout_probability=0.2,
                                    activation=functools.partial(
                                        nn.LeakyReLU, negative_slope=0.1),
                                    last_layer_is_output=True,
                                    config=default_layer_config(
                                        dimensionality=None,
                                        norm_type=NormType.InstanceNorm)))
Exemple #2
0
    def test_layer_res(self):
        config = default_layer_config(dimensionality=2)
        b = BlockRes(config,
                     8,
                     kernel_size=7,
                     padding='same',
                     padding_mode='reflect')

        i = torch.zeros([2, 8, 16, 16])
        o = b(i)
        assert o.shape == (2, 8, 16, 16)

        version = torch.__version__[:3]

        ops_b = list(b.block_1.ops)
        assert len(ops_b) == 3
        assert isinstance(ops_b[0].ops[0], torch.nn.Conv2d)
        if version != '1.0':
            assert ops_b[0].ops[0].padding_mode == 'reflect'
        assert isinstance(ops_b[1], torch.nn.BatchNorm2d)
        assert isinstance(ops_b[2], torch.nn.ReLU)

        ops_b = list(b.block_2.ops)
        assert len(ops_b) == 2
        assert isinstance(ops_b[0].ops[0], torch.nn.Conv2d)
        if version != '1.0':
            assert ops_b[0].ops[0].padding_mode == 'reflect'
        assert isinstance(ops_b[1], torch.nn.BatchNorm2d)
    def __init__(self):
        super().__init__()

        config = default_layer_config(conv_kwargs={
            'padding': 'same',
            'bias': False
        },
                                      deconv_kwargs={
                                          'padding': 'same',
                                          'bias': False
                                      })
        generator = trw.layers.EncoderDecoderResnet(
            dimensionality=2,
            input_channels=3,
            output_channels=3,
            convolution_kernel=3,
            encoding_channels=[64, 128, 256],
            decoding_channels=[256, 128, 64],
            decoding_block=BlockUpsampleNnConvNormActivation,
            init_block=functools.partial(BlockConvNormActivation,
                                         kernel_size=7),
            out_block=functools.partial(BlockConvNormActivation,
                                        kernel_size=7),
            config=config)
        self.generator = generator
    def __init__(self):
        super().__init__()

        config = default_layer_config(conv_kwargs={
            'padding': 'same',
            'bias': True,
            'padding_mode': 'reflect'
        },
                                      deconv_kwargs={
                                          'padding': 'same',
                                          'bias': True,
                                          'padding_mode': 'reflect'
                                      },
                                      norm_type=NormType.InstanceNorm)
        channels = [32, 64, 128]
        generator = trw.layers.EncoderDecoderResnet(
            dimensionality=2,
            input_channels=3,
            output_channels=3,
            encoding_channels=channels,
            decoding_channels=list(reversed(channels)),
            decoding_block=BlockUpsampleNnConvNormActivation,
            init_block=functools.partial(BlockConvNormActivation,
                                         kernel_size=7),
            out_block=functools.partial(BlockConvNormActivation,
                                        kernel_size=7),
            config=config)
        self.generator = generator
    def __init__(self):
        super().__init__()

        factor = 1
        base_filters = [64, 128, 256, 2]
        filters = [f * factor for f in base_filters]

        config = default_layer_config(conv_kwargs={
            'padding': 'same',
            'bias': True,
            'padding_mode': 'reflect'
        },
                                      deconv_kwargs={
                                          'padding': 'same',
                                          'bias': True,
                                          'padding_mode': 'reflect'
                                      },
                                      norm_type=NormType.InstanceNorm)
        self.convs = trw.layers.convs_2d(6,
                                         filters,
                                         convolution_kernels=[4, 4, 4, 1],
                                         strides=[2, 2, 2, 1],
                                         pooling_size=None,
                                         dropout_probability=0.2,
                                         activation=functools.partial(
                                             nn.LeakyReLU, negative_slope=0.2),
                                         last_layer_is_output=True,
                                         config=config)
Exemple #6
0
    def test_encoder_decoder_res_k4(self):
        config = default_layer_config(conv_kwargs={
            'padding': 'same',
            'bias': False
        },
                                      deconv_kwargs={
                                          'padding': 'same',
                                          'bias': False
                                      })
        I_O = functools.partial(BlockConvNormActivation, kernel_size=7)
        model = EncoderDecoderResnet(
            2,
            3,
            2,
            encoding_channels=[16, 32, 64],
            decoding_channels=[64, 32, 16],
            convolution_kernel=4,
            init_block=I_O,
            out_block=I_O,
            config=config,
            decoding_block=BlockUpsampleNnConvNormActivation,
            activation=nn.LeakyReLU)

        t = torch.zeros([5, 3, 32, 64])
        o = model(t)
        assert o.shape == (5, 2, 32, 64)
Exemple #7
0
    def test_encoder_decoder_res(self):
        config = default_layer_config(
            conv_kwargs={'padding': 'same', 'bias': False},
            deconv_kwargs={'padding': 'same', 'bias': False}
        )
        I_O = functools.partial(BlockConvNormActivation, kernel_size=7)
        model = EncoderDecoderResnet(
            2, 3, 2,
            encoding_channels=[16, 32, 64],
            decoding_channels=[64, 32, 16],
            convolution_kernel=5,
            init_block=I_O,
            out_block=I_O,
            config=config,
            activation=nn.LeakyReLU
        )

        t = torch.zeros([5, 3, 32, 64])
        o = model(t)
        assert o.shape == (5, 2, 32, 64)

        assert len(model.initial.ops) == 3
        assert model.initial.ops[0].kernel_size == (7, 7)
        assert model.initial.ops[0].bias is None
        assert isinstance(model.initial.ops[2], nn.LeakyReLU)

        assert len(model.encoders) == 3

        ops = model.encoders[0].ops
        assert len(ops) == 3
        assert ops[0].kernel_size == (5, 5)
        assert ops[0].bias is None
        assert isinstance(ops[2], nn.LeakyReLU)

        ops = model.encoders[1].ops
        assert len(ops) == 3
        assert ops[0].kernel_size == (5, 5)
        assert ops[0].bias is None
        assert isinstance(ops[2], nn.LeakyReLU)

        ops = model.encoders[2].ops
        assert len(ops) == 3
        assert ops[0].kernel_size == (5, 5)
        assert ops[0].bias is None
        assert isinstance(ops[2], nn.LeakyReLU)


        assert len(model.decoders) == 3
        ops = model.decoders[2].ops
        assert len(ops) == 3
        assert ops[0].kernel_size == (5, 5)
        assert ops[0].bias is None
        assert isinstance(ops[2], nn.LeakyReLU)

        ops = model.out.ops
        assert len(ops) == 2
        assert ops[0].kernel_size == (7, 7)
        assert ops[0].bias is None
        assert isinstance(ops[1], nn.BatchNorm2d)
    def __init__(self):
        super().__init__()

        config = default_layer_config(dimensionality=2,
                                      norm_type=NormType.InstanceNorm)
        #config = default_layer_config(norm_type=NormType.BatchNorm)
        """
        channels = [32, 64, 128, 256]
        generator = UpsamplingGenerator(
            dimensionality=2,
            input_channels=0,
            channels=channels + [3],
            stride=[2] * len(channels) + [1],
            conditional_channels=3,
            config=config,
            last_layer_is_output=True,
            blocks_with_supervision=[0, 3]
        )
        """
        output_channels = 3
        channels = [64, 128, 256]
        intermediates = [128, 64, 32]
        self.intermediates_outputs = nn.ModuleList()
        for i in intermediates:
            config_intermediate = copy.deepcopy(config)
            config_intermediate.activation = None
            config_intermediate.norm = None
            self.intermediates_outputs.append(
                trw.layers.BlockConvNormActivation(
                    config=config_intermediate,
                    input_channels=i,
                    output_channels=output_channels,
                    kernel_size=1))
        """
        generator = trw.layers.EncoderDecoderResnet(
            dimensionality=2,
            input_channels=3,
            output_channels=3,
            convolution_kernel=3,
            encoding_channels=channels,
            decoding_channels=list(reversed(channels)),
            decoding_block=BlockUpsampleNnConvNormActivation,
            init_block=functools.partial(BlockConvNormActivation, kernel_size=7),
            out_block=functools.partial(BlockConvNormActivation, kernel_size=7),
            config=config
        )
        """

        generator = trw.layers.UNetBase(dim=2,
                                        input_channels=3,
                                        channels=channels,
                                        output_channels=3,
                                        config=config)

        self.generator = generator
        self.scale = 2**len(channels)
Exemple #9
0
    def __init__(
        self,
        dimensionality: int,
        input_channels: int,
        encoder_channels: Sequence[int],
        decoder_channels: Sequence[int],
        convolution_kernels: Optional[Union[int, Sequence[int]]] = 5,
        encoder_strides: Union[int, List[int], NestedIntSequence] = 1,
        decoder_strides: Union[int, List[int], NestedIntSequence] = 2,
        pooling_size: Optional[Union[int, List[int], NestedIntSequence]] = 2,
        convolution_repeats: Union[int, Sequence[int]] = 1,
        activation: Optional[Activation] = nn.ReLU,
        dropout_probability: Optional[float] = None,
        norm_type: NormType = NormType.BatchNorm,
        norm_kwargs: Dict = {},
        activation_kwargs: Dict = {},
        last_layer_is_output: bool = False,
        force_decoded_size_same_as_input: bool = True,
        squash_function: Optional[Callable[[torch.Tensor],
                                           torch.Tensor]] = None,
        config: LayerConfig = default_layer_config(dimensionality=None)):

        super().__init__()
        self.force_decoded_size_same_as_input = force_decoded_size_same_as_input

        self.encoder = ConvsBase(dimensionality=dimensionality,
                                 input_channels=input_channels,
                                 channels=encoder_channels,
                                 convolution_kernels=convolution_kernels,
                                 strides=encoder_strides,
                                 pooling_size=pooling_size,
                                 convolution_repeats=convolution_repeats,
                                 activation=activation,
                                 dropout_probability=dropout_probability,
                                 norm_type=norm_type,
                                 activation_kwargs=activation_kwargs,
                                 norm_kwargs=norm_kwargs,
                                 last_layer_is_output=False,
                                 config=config)

        self.decoder = ConvsTransposeBase(
            dimensionality=dimensionality,
            input_channels=encoder_channels[-1],
            channels=decoder_channels,
            strides=decoder_strides,
            activation=activation,
            dropout_probability=dropout_probability,
            norm_type=norm_type,
            norm_kwargs=norm_kwargs,
            activation_kwargs=activation_kwargs,
            last_layer_is_output=last_layer_is_output,
            squash_function=squash_function,
            config=config)
    def test_efficient_net_construction(self):
        config = default_layer_config(dimensionality=2, activation=Swish)

        torch.random.manual_seed(0)
        i = torch.randn([4, 3, 224, 224])

        net = EfficientNet(dimensionality=2, input_channels=3, output_channels=10, config=config)
        o = net.feature_extractor(i)
        assert o.shape == (4, 1280, 7, 7)

        o = net(i)
        assert o.shape == (4, 10)

        intermediates = net.forward_with_intermediate(i)
        assert len(intermediates) == 9
Exemple #11
0
    def test_upsample_nn(self):
        config = default_layer_config(dimensionality=3,
                                      norm_type=NormType.InstanceNorm)
        block = BlockUpsampleNnConvNormActivation(config,
                                                  2,
                                                  4,
                                                  kernel_size=3,
                                                  stride=(1, 2, 3))

        i = torch.zeros([10, 2, 4, 5, 6], dtype=torch.float32)
        o = block(i)
        assert o.shape == (10, 4, 4 * 1, 5 * 2, 6 * 3)

        ops = list(block.ops)
        assert len(ops) == 4
        assert isinstance(ops[0], nn.Upsample)
        assert isinstance(ops[1], nn.Conv3d)
        assert isinstance(ops[2], nn.InstanceNorm3d)
        assert isinstance(ops[3], nn.ReLU)
Exemple #12
0
    def __init__(self, options):
        super().__init__()
        config = default_layer_config()
        I_O = functools.partial(BlockConvNormActivation, kernel_size=7)
        self.model = trw.layers.EncoderDecoderResnet(
            3,
            input_channels=1,
            output_channels=3,
            encoding_channels=[64, 128],
            decoding_channels=[128, 64],
            init_block=I_O,
            out_block=I_O,
            config=config,
            nb_residual_blocks=18,
        )

        self.norm_input = nn.InstanceNorm3d(1)

        self.options = options
Exemple #13
0
    def __init__(self):
        super().__init__()

        self.init_block = BlockConvNormActivation(config=default_layer_config(
            norm_type=None,
            dimensionality=2,
            activation=functools.partial(nn.LeakyReLU, negative_slope=0.2),
        ),
                                                  input_channels=6,
                                                  output_channels=64,
                                                  kernel_size=4,
                                                  stride=2)

        self.convs = trw.layers.convs_2d(input_channels=64,
                                         channels=[128, 256, 512, 2],
                                         convolution_kernels=4,
                                         strides=[2, 2, 1, 1],
                                         pooling_size=None,
                                         last_layer_is_output=True,
                                         activation=functools.partial(
                                             nn.LeakyReLU, negative_slope=0.2))
    def __init__(
        self,
        unet: UNetBase,
        input_target_shape: ShapeCX,
        output_creator: Callable[[TorchTensorNCX, TensorNCX], Output],
        output_block: ConvBlockType = BlockConvNormActivation,
        discard_top_k_outputs: int = 1,
        resize_mode: Literal['nearest', 'linear'] = 'nearest',
        config: LayerConfig = default_layer_config(dimensionality=None)):

        super().__init__()
        self.unet = unet
        assert len(
            input_target_shape
        ) == unet.dim + 1, 'must be a shape with `N` component removed!'

        device = get_device(unet)
        self.discard_top_k_outputs = discard_top_k_outputs

        # dummy test to get the intermediate layer shapes
        dummy_input = torch.zeros([1] + list(input_target_shape),
                                  device=device)
        outputs = unet.forward_with_intermediate(dummy_input)

        # no activation, these are all output nodes!
        config = copy.copy(config)
        config.set_dim(unet.dim)
        config.activation = None
        config.norm = None

        self.outputs = nn.ModuleList()
        for o in outputs[discard_top_k_outputs:]:
            output = output_block(config, o.shape[1],
                                  self.unet.output_channels)
            self.outputs.append(output)

        self.output_creator = output_creator
        self.resize_mode = resize_mode
Exemple #15
0
    def test_conv2(self):
        net = trw.layers.ConvsBase(
            3, 2, channels=[4, 8, 10],
            convolution_kernels=3,
            strides=2,
            pooling_size=2,
            norm_type=trw.layers.NormType.InstanceNorm,
            convolution_repeats=2,
            activation=nn.LeakyReLU,
            with_flatten=True,
            config=default_layer_config(dimensionality=None),
            last_layer_is_output=True)

        #
        # First Block
        #
        assert len(net.layers) == 3
        children_0 = list(net.layers[0].children())
        assert len(children_0) == 3

        children_00 = list(net.layers[0][0].ops)
        assert len(children_00) == 3
        assert isinstance(children_00[0], nn.Conv3d)
        assert children_00[0].weight.shape[1] == 2
        assert children_00[0].stride == (1, 1, 1)
        assert isinstance(children_00[1], nn.InstanceNorm3d)
        assert isinstance(children_00[2], nn.LeakyReLU)

        children_01 = list(net.layers[0][1].ops)
        assert len(children_01) == 3
        assert isinstance(children_01[0], nn.Conv3d)
        assert children_01[0].weight.shape[1] == 4
        assert children_01[0].stride == (2, 2, 2)
        assert isinstance(children_01[1], nn.InstanceNorm3d)
        assert isinstance(children_01[2], nn.LeakyReLU)

        children_02 = net.layers[0][2].op
        assert isinstance(children_02, nn.MaxPool3d)

        #
        # Second block
        #
        children_1 = list(net.layers[1].children())
        assert len(children_1) == 3

        children_10 = list(net.layers[1][0].ops)
        assert len(children_10) == 3
        assert isinstance(children_10[0], nn.Conv3d)
        assert children_10[0].weight.shape[1] == 4
        assert children_10[0].stride == (1, 1, 1)
        assert isinstance(children_10[1], nn.InstanceNorm3d)
        assert isinstance(children_10[2], nn.LeakyReLU)

        children_11 = list(net.layers[1][1].ops)
        assert len(children_11) == 3
        assert isinstance(children_11[0], nn.Conv3d)
        assert children_11[0].weight.shape[1] == 8
        assert children_11[0].stride == (2, 2, 2)
        assert isinstance(children_11[1], nn.InstanceNorm3d)
        assert isinstance(children_11[2], nn.LeakyReLU)

        children_12 = net.layers[1][2].op
        assert isinstance(children_12, nn.MaxPool3d)

        #
        # Third block
        #
        children_2 = list(net.layers[2].children())
        assert len(children_2) == 3

        children_20 = list(net.layers[2][0].ops)
        assert len(children_20) == 3
        assert isinstance(children_20[0], nn.Conv3d)
        assert children_20[0].stride == (1, 1, 1)
        assert isinstance(children_20[1], nn.InstanceNorm3d)
        assert isinstance(children_20[2], nn.LeakyReLU)

        children_21 = list(net.layers[2][1].ops)
        assert len(children_21) == 1
        assert isinstance(children_21[0], nn.Conv3d)
        assert children_11[0].stride == (2, 2, 2)

        children_22 = net.layers[2][2].op
        assert isinstance(children_22, nn.MaxPool3d)
Exemple #16
0
    def __init__(
            self,
            dimensionality: int,
            input_channels: int,
            channels: Sequence[int],
            *,
            convolution_kernels: Union[int, List[int], NestedIntSequence] = 5,
            strides: Union[int, List[int], NestedIntSequence] = 2,
            paddings: Optional[Union[str, int, List[int],
                                     NestedIntSequence]] = None,
            activation: Any = nn.ReLU,
            activation_kwargs: Dict = {},
            dropout_probability: Optional[float] = None,
            norm_type: Optional[NormType] = None,
            norm_kwargs: Dict = {},
            last_layer_is_output: bool = False,
            squash_function: Optional[Callable[[torch.Tensor],
                                               torch.Tensor]] = None,
            deconv_block_fn: ConvTransposeBlockType = BlockDeconvNormActivation,
            config: LayerConfig = default_layer_config(dimensionality=None),
            target_shape: Optional[Sequence[int]] = None):
        """

        Args:
            dimensionality: the dimension of the  CNN (2 for 2D or 3 for 3D)
            input_channels: the number of input channels
            channels: the number of channels for each convolutional layer
            convolution_kernels: for each convolution group, the kernel of the convolution
            strides: for each convolution group, the stride of the convolution
            dropout_probability: if None, not dropout. Else the probability of dropout after each convolution
            norm_kwargs: the normalization additional arguments. See the original torch functions for description.
            last_layer_is_output: if True, the last convolution will NOT have activation, dropout, batch norm, LRN
            squash_function: a function to be applied on the reconstuction. It is common to apply
                for example ``torch.sigmoid``. If ``None``, no function applied
            paddings: the paddings added. If ``None``, half the convolution kernel will be used.
            target_shape: if not ``None``, the output layer will be cropped or padded to mach the target
                (N, C components excluded)
        """
        super().__init__()

        # update the configuration locally
        config = copy.copy(config)
        if norm_type is not None:
            config.norm_type = norm_type
        if activation is not None:
            config.activation = activation
        config.set_dim(dimensionality)
        config.norm_kwargs = {**norm_kwargs, **config.activation_kwargs}
        config.activation_kwargs = {
            **activation_kwargs,
            **config.activation_kwargs
        }

        # normalize the arguments
        nb_convs = len(channels)
        if not isinstance(convolution_kernels, list):
            convolution_kernels = [convolution_kernels] * nb_convs
        if not isinstance(strides, list):
            strides = [strides] * nb_convs
        if paddings is None:
            paddings = [div_shape(kernel, 2) for kernel in convolution_kernels]
        elif isinstance(paddings, numbers.Integral):
            paddings = [paddings] * nb_convs
        else:
            assert isinstance(
                paddings, collections.Sequence) and len(paddings) == nb_convs

        assert nb_convs == len(
            convolution_kernels
        ), 'must be specified for each convolutional layer'
        assert nb_convs == len(
            strides), 'must be specified for each convolutional layer'

        layers = nn.ModuleList()

        prev = input_channels
        for n in range(len(channels)):
            current = channels[n]
            currently_last_layer = n + 1 == len(channels)

            p = paddings[n]
            if last_layer_is_output and currently_last_layer:
                # Last layer layer should not have dropout/normalization/activation
                config.norm = None
                config.activation = None
                config.dropout = None

            ops = []

            ops.append(
                deconv_block_fn(
                    config,
                    prev,
                    current,
                    kernel_size=convolution_kernels[n],
                    stride=strides[n],
                    padding=p,
                    output_padding=strides[n] - 1,
                ))

            if config.dropout is not None and dropout_probability is not None:
                ops.append(
                    config.dropout(p=dropout_probability,
                                   **config.dropout_kwargs))

            layers.append(nn.Sequential(*ops))
            prev = current
        self.layers = layers
        self.squash_function = squash_function
        self.target_shape = target_shape
    def __init__(self,
                 dimensionality: int,
                 input_channels: int,
                 channels: Sequence[int],
                 conditional_channels: int,
                 blocks_with_supervision: Sequence[int],
                 *,
                 kernel_size: ConvKernels = 3,
                 stride: Optional[Stride] = 2,
                 resize_mode: Literal['nearest', 'linear'] = 'nearest',
                 upsampling_block:
                 ConvTransposeBlockType = BlockUpsampleNnConvNormActivation,
                 last_layer_is_output: bool = True,
                 config: LayerConfig = default_layer_config()):

        super().__init__()
        self.dimensionality = dimensionality
        self.channels = channels
        self.input_channels = input_channels
        self.resize_mode = resize_mode
        self.conditional_channels = conditional_channels
        assert len(blocks_with_supervision) >= 1

        config = copy.deepcopy(config)
        config.set_dim(dimensionality)

        if stride is not None:
            if isinstance(stride, list):
                assert len(stride) == len(channels)
            else:
                stride = [stride] * len(channels)

        if kernel_size is not None:
            config.conv_kwargs['kernel_size'] = kernel_size
            config.deconv_kwargs['kernel_size'] = kernel_size

        self.blocks = nn.ModuleList()
        self.blocks_with_supervision = sorted(blocks_with_supervision)
        supervision_index = 0
        i = input_channels
        for block_index, o in enumerate(channels):
            if supervision_index < len(self.blocks_with_supervision) and \
                    block_index == self.blocks_with_supervision[supervision_index]:
                # we will receive additional supervision here. Increase the
                # channels by the number of input channels. Supervision MUST be a
                # down-scaled version of the full scale conditional image
                i = i + self.conditional_channels
                supervision_index += 1

            if stride is not None:
                config.deconv_kwargs['stride'] = stride[block_index]

            if last_layer_is_output and block_index + 1 == len(channels):
                # last block: remove activation and normalization layers
                config.activation = None
                config.norm = None

            block = upsampling_block(config=config,
                                     input_channels=i,
                                     output_channels=o)
            i = o
            self.blocks.append(block)