Esempio n. 1
0
    def __init__(self, *args, **kwargs):
        super().__init__()
        self.name = kwargs["name"]
        self.activation = None if "activation" not in kwargs.keys() \
            else kwargs["activation"]
        kwargs["activation"] = None

        self.conv = lm.Convolution3dModule(*args, **kwargs)

        bn_scale = lbann.Weights(
            initializer=lbann.ConstantInitializer(value=1.0),
            name="{}_bn_scale".format(self.name))
        bn_bias = lbann.Weights(
            initializer=lbann.ConstantInitializer(value=0.0),
            name="{}_bn_bias".format(self.name))
        self.bn_weights = [bn_scale, bn_bias]
        self.instance = 0
Esempio n. 2
0
    def __init__(self, name=None):
        """Initialize 3D U-Net.

        Args:
            name (str, optional): Module name
                (default: 'alexnet_module<index>').
        """

        UNet3D.global_count += 1
        self.instance = 0
        self.name = (name if name else "unet3d_module{0}".format(
            UNet3D.global_count))

        # The list of ([down-conv filters], [up-conv filters], deconv filters)
        self.BLOCKS = [
            ([32, 64], [64, 64], 128),  # bconv1_down, bconv3_up, deconv3
            ([64, 128], [128, 128], 256),  # bconv2_down, bconv2_up, deconv2
            ([128, 256], [256, 256], 512),  # bconv3_down, bconv1_up, deconv1
        ]
        # The list of the number of filters of the "bottom" convolution block
        self.BOTTOM_BLOCK = [256, 512]
        # The number of pooling/deconvolution layers
        self.NUM_LEVELS = len(self.BLOCKS)
        # Whether PARTITIONED_LEVELS-th pooling/deconvolution is partitioned
        self.PARTITION_INCLUDE_POOL = True

        # Deconvolution should have the same number of input/output channels
        assert self.BLOCKS[-1][2] == self.BOTTOM_BLOCK[1]
        assert all([
            self.BLOCKS[x][2] == self.BLOCKS[x + 1][1][-1]
            for x in range(self.NUM_LEVELS - 1)
        ])

        # Building blocks
        self.downconvs = []
        self.upconvs = []
        self.deconvs = []
        for i, blocks in enumerate(self.BLOCKS):
            downBlock, upBlock, deconv = blocks
            self.downconvs.append(
                UNet3DConvBlock(downBlock,
                                name="{}_bconv{}_down".format(
                                    self.name, i + 1)))
            ui = self.NUM_LEVELS - 1 - i
            self.upconvs.insert(
                0,
                UNet3DConvBlock(upBlock,
                                name="{}_bconv{}_up".format(self.name,
                                                            ui + 1)))
            self.deconvs.insert(
                0,
                Deconvolution3dModule(deconv,
                                      2,
                                      stride=2,
                                      padding=0,
                                      activation=None,
                                      bias=False,
                                      name="{}_deconv{}".format(
                                          self.name, ui + 1)))

        # The bottom convolution
        self.bottomconv = UNet3DConvBlock(self.BOTTOM_BLOCK,
                                          name="{}_bconv_bottom".format(
                                              self.name))

        # The last convolution
        self.lastconv = lm.Convolution3dModule(3,
                                               1,
                                               stride=1,
                                               padding=0,
                                               activation=None,
                                               bias=False,
                                               name="{}_lconv".format(
                                                   self.name))
Esempio n. 3
0
    def __init__(self, output_size, input_width, name=None):
        """Initialize CosmFlow.

        Args:
            output_size (int): Size of output tensor.
            input_width (int): Width of input tensor.
            name (str, optional): Module name
                (default: 'cosmoflow_module<index>').

        """
        CosmoFlow.global_count += 1
        self.instance = 0
        self.name = (name if name else 'cosmoflow_module{0}'.format(
            CosmoFlow.global_count))
        self.input_width = input_width
        assert self.input_width in [128, 256, 512]

        self.layer_params = [
            {
                "type": "conv",
                "out_channels": 16,
                "kernel_size": 3,
                "stride": 1
            },
            {
                "type": "pool"
            },
            {
                "type": "conv",
                "out_channels": 32,
                "kernel_size": 3,
                "stride": 1
            },
            {
                "type": "pool"
            },
            {
                "type": "conv",
                "out_channels": 64,
                "kernel_size": 3,
                "stride": 1
            },
            {
                "type": "pool"
            },
            {
                "type": "conv",
                "out_channels": 128,
                "kernel_size": 3,
                "stride": 2
            },
            {
                "type": "pool"
            },
            {
                "type": "conv",
                "out_channels": 256,
                "kernel_size": 3,
                "stride": 1
            },
            {
                "type": "pool"
            },
            {
                "type": "conv",
                "out_channels": 256,
                "kernel_size": 3,
                "stride": 1
            },
            {
                "type": "conv",
                "out_channels": 256,
                "kernel_size": 3,
                "stride": 1
            },
        ]
        for p in self.layer_params:
            if p["type"] == "conv":
                p["padding"] = int((p["kernel_size"] - 1) / 2)

        additional_pools = []
        if self.input_width == 256:
            additional_pools = [6]
        elif self.input_width == 512:
            additional_pools = [6, 7]

        for i in additional_pools:
            conv_idx = list(
                np.cumsum([
                    1 if x["type"] == "conv" else 0 for x in self.layer_params
                ])).index(i)
            self.layer_params.insert(conv_idx + 1, {"type": "pool"})

        width = self.input_width
        for p in self.layer_params:
            if p["type"] == "conv":
                output_width = int(width / p["stride"])
            else:
                output_width = int(width / 2)

            p["width"] = output_width
            width = output_width
            assert width > 0

        for i, param in enumerate(
                filter(lambda x: x["type"] == "conv", self.layer_params)):
            conv_name = "conv" + str(i + 1)
            conv_weights = [
                Weights(initializer=lbann.GlorotUniformInitializer())
            ]

            param_actual = dict(param)
            param_actual.pop("type", None)
            param_actual.pop("width", None)

            conv = lm.Convolution3dModule(**param_actual,
                                          activation=lbann.LeakyRelu,
                                          name=self.name + "_" + conv_name,
                                          bias=False,
                                          weights=conv_weights)
            setattr(self, conv_name, conv)

        # Create fully-connected layers
        fc_params = [
            {
                "size": 2048
            },
            {
                "size": 256
            },
            {
                "size": output_size
            },
        ]
        for i, param in enumerate(fc_params):
            fc_name = "fc" + str(i + 1)
            fc = lm.FullyConnectedModule(
                **param,
                activation=lbann.LeakyRelu if i < len(fc_params) - 1 else None,
                name=self.name + "_" + fc_name,
                weights=[
                    Weights(initializer=lbann.GlorotUniformInitializer()),
                    Weights(initializer=lbann.ConstantInitializer(value=0.1))
                ],
            )
            setattr(self, fc_name, fc)