コード例 #1
0
    def forward_generator(self, z, mcr):
        '''
        Build the Generator
        '''
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_fc1(z),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        dims = '512 8 8'
        x = lbann.Reshape(x, dims=dims)  #channel first

        for count, lyr in enumerate(self.g_convT):
            x = lbann.Relu(
                lbann.BatchNormalization(lyr(x),
                                         decay=0.9,
                                         scale_init=1.0,
                                         epsilon=1e-5))

        img = self.g_convT3(x)

        if mcr:  ### For multi-channel rescaling, add extra channel to output image
            linear_scale = 1 / self.linear_scaler
            #             linear_scale=lbann.Constant(value=0.001)
            ch2 = lbann.Tanh(
                lbann.WeightedSum(self.inv_transform(img),
                                  scaling_factors=str(linear_scale)))
            y = lbann.Concatenation(img, ch2, axis=0)
            img = lbann.Reshape(y, dims='2 128 128')
        else:
            img = lbann.Reshape(img, dims='1 128 128')

        return img
コード例 #2
0
 def forward_generator(self,z):
     x = lbann.Relu(lbann.BatchNormalization(self.g_fc1(z),decay=0.9,scale_init=1.0,epsilon=1e-5))
     x = lbann.Reshape(x, dims='512 8 8') #channel first
     x = lbann.Relu(lbann.BatchNormalization(self.g_convT[0](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
     x = lbann.Relu(lbann.BatchNormalization(self.g_convT[1](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
     x = lbann.Relu(lbann.BatchNormalization(self.g_convT[2](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
     return self.g_convT3(x) 
コード例 #3
0
def standard_batchnorm(statistics_group_size, parent_node):
    return lbann.BatchNormalization(
        parent_node,
        bias_init=0.0,
        decay=0.9,
        epsilon=1e-5,
        scale_init=1.0,
        statistics_group_size=statistics_group_size)
コード例 #4
0
def standard_batchnorm(parent_node):
    return lbann.BatchNormalization(
        parent_node,
        bias_init=0.0,
        decay=0.9,
        epsilon=1e-5,
        scale_init=1.0
    )
コード例 #5
0
    def forward_generator(self, z, mcr):
        '''
        Build the Generator
        '''
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_fc1(z),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        dims = '512 8 8'

        print("dims", dims)
        x = lbann.Reshape(x, dims=dims)  #channel first
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_convT[0](x),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_convT[1](x),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_convT[2](x),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        img = self.g_convT3(x)

        if mcr:  ### For multi-channel rescaling, add extra channel to output image
            linear_scale = 1 / self.linear_scaler
            #ch2 = lbann.Tanh(self.inv_transform(img)/linear_scalar)
            ch2 = lbann.Tanh(
                lbann.WeightedSum(self.inv_transform(img),
                                  scaling_factors=str(linear_scale)))
            y = lbann.Concatenation(img, ch2, axis=0)
            img = lbann.Reshape(y, dims='2 128 128')
        else:
            img = lbann.Reshape(img, dims='1 128 128')

        print('Gen Img in GAN', img.__dict__)
        return img
コード例 #6
0
ファイル: unet3d.py プロジェクト: benson31/lbann
    def forward(self, x):
        self.instance += 1
        x = self.conv(x)
        x = lbann.BatchNormalization(x,
                                     weights=self.bn_weights,
                                     statistics_group_size=-1,
                                     name="{}_bn_instance{}".format(
                                         self.name, self.instance))
        if self.activation is not None:
            x = self.activation(x)

        return x
コード例 #7
0
ファイル: resnet.py プロジェクト: wderekjones/lbann
 def forward(self, x):
     self.instance += 1
     conv = self.conv(x)
     bn = lbann.BatchNormalization(
         conv,
         weights=self.bn_weights,
         stats_aggregation=self.bn_stats_aggregation,
         name='{0}_bn_instance{1}'.format(self.name, self.instance))
     if self.relu:
         return lbann.Relu(bn,
                           name='{0}_relu_instance{1}'.format(
                               self.name, self.instance))
     else:
         return bn
コード例 #8
0
ファイル: cosmoflow.py プロジェクト: benson31/lbann
 def forward(self, x):
     self.instance += 1
     layer = self.conv(x)
     if self.use_bn:
         layer = lbann.BatchNormalization(
             layer,
             weights=self.bn_weights,
             statistics_group_size=self.bn_statistics_group_size,
             decay=0.999,
             name='{0}_bn_instance{1}'.format(self.name, self.instance))
     if self.activation:
         layer = self.activation(layer,
                                 name='{0}_activation_instance{1}'.format(
                                     self.name, self.instance))
     return layer
コード例 #9
0
ファイル: resnet.py プロジェクト: oyamay/lbann
 def forward(self, x):
     self.instance += 1
     conv = self.conv(x)
     bn = lbann.BatchNormalization(
         conv,
         weights=self.bn_weights,
         statistics_group_size=(-1 if self.bn_statistics_group_size == 0
                                else self.bn_statistics_group_size),
         name='{0}_bn_instance{1}'.format(self.name, self.instance))
     if self.relu:
         return lbann.Relu(bn,
                           name='{0}_relu_instance{1}'.format(
                               self.name, self.instance))
     else:
         return bn
コード例 #10
0
 def forward(self, x):
     self.instance += 1
     name = '{0}_instance{1}'.format(self.name, self.instance)
     return lbann.BatchNormalization(
         x,
         weights=[
             self.scale, self.bias, self.running_mean, self.running_variance
         ],
         decay=0.9,
         scale_init=1.0,
         bias_init=0.0,
         epsilon=1e-5,
         statistics_group_size=self.statistics_group_size,
         name=name,
         data_layout=self.data_layout)
コード例 #11
0
ファイル: DistConvGAN.py プロジェクト: benson31/lbann
    def forward(self, x):
        self.instance += 1

        # Convolution
        layer = self.conv(x)

        # Batchnorm
        if self.use_bn:
            layer = lbann.BatchNormalization(
                layer,
                weights=self.bn_weights,
                statistics_group_size=self.bn_statistics_group_size,
                decay=0.999,
                parallel_strategy=self.ps,
                name='{0}_bn_instance{1}'.format(self.name, self.instance))

        # Strided pooling
        # Note: Ideally we would do this immediately after the
        # convolution, but we run into issues since the tensor
        # overlaps don't match.
        ### @todo Support strided convolution in distconv
        if self.stride != 1:
            layer = lbann.Pooling(layer,
                                  num_dims=3,
                                  pool_dims_i=self.stride,
                                  pool_strides_i=self.stride,
                                  pool_mode='max',
                                  parallel_strategy=self.ps,
                                  name='{0}_pool_instance{1}'.format(
                                      self.name, self.instance))

        # Activation
        if self.activation:
            layer = self.activation(layer,
                                    parallel_strategy=self.ps,
                                    name='{0}_activation_instance{1}'.format(
                                        self.name, self.instance))

        return layer
コード例 #12
0
    def test_l2o_layer_batch_normalization(self):
        N, C, H, W = (100, 200, 300, 400)
        decay = 0.95
        epsilon = 1e-6

        onnxBN = onnx.helper.make_node(
            "BatchNormalization",
            inputs=["x", "scale", "B", "mean", "var"],
            outputs=["y"],
            epsilon=epsilon,
            momentum=decay,
            spatial=1)

        layer = lbann.BatchNormalization(
            lbann.Input(name="x"),
            decay=decay,
            epsilon=epsilon,
        )
        lbannBN = parseLbannLayer(layer.export_proto(),
                                  {"x_0": (N, C, H, W)})["nodes"]

        self._assertFields(lbannBN, onnxBN)
コード例 #13
0
    def test_o2l_layer_BatchNormalization(self):
        N, C, H, W = (100, 200, 300, 400)
        decay = 0.95
        epsilon = 1e-6

        lbannBN = lbann.BatchNormalization(
            lbann.Input(),
            decay=decay,
            epsilon=epsilon,
        )

        inputShapes = {"x": [N, C, H, W]}
        paramShapes = {"scale": [C], "B": [C], "mean": [C], "var": [C]}

        node = onnx.helper.make_node("BatchNormalization",
                                     inputs=["x", "scale", "B", "mean", "var"],
                                     outputs=["y"],
                                     epsilon=epsilon,
                                     momentum=decay)
        onnxBN = convertOnnxNode(node, inputShapes,
                                 paramShapes).batch_normalization

        self._assertFields(lbannBN, onnxBN)
コード例 #14
0
def gen_layers(latent_dim, number_of_atoms):
    ''' Generates the model for the 3D Convolutional Auto Encoder. 
        
                returns the Directed Acyclic Graph (DAG) that the lbann 
        model will run on. 
    '''

    input_ = lbann.Input(target_mode="reconstruction")
    tensors = lbann.Identity(input_)

    tensors = lbann.Reshape(tensors, dims="11 32 32 32", name="Sample")
    # Input tensor shape is  (number_of_atoms)x32x32x32

    # Encoder

    x = lbann.Identity(tensors)
    for i in range(4):
        out_channels = latent_dim // (2**(3 - i))

        x = lbann.Convolution(x,
                              num_dims=3,
                              num_output_channels=out_channels,
                              num_groups=1,
                              conv_dims_i=4,
                              conv_strides_i=2,
                              conv_dilations_i=1,
                              conv_pads_i=1,
                              has_bias=True,
                              name="Conv_{0}".format(i))

        x = lbann.BatchNormalization(x, name="Batch_NORM_{0}".format(i + 1))
        x = lbann.LeakyRelu(x, name="Conv_{0}_Activation".format(i + 1))

    # Shape: (latent_dim)x2x2x2
    encoded = lbann.Convolution(x,
                                num_dims=3,
                                num_output_channels=latent_dim,
                                num_groups=1,
                                conv_dims_i=2,
                                conv_strides_i=2,
                                conv_dilations_i=1,
                                conv_pads_i=0,
                                has_bias=True,
                                name="encoded")

    # Shape: (latent_dim)1x1x1

    # Decoder

    x = lbann.Deconvolution(encoded,
                            num_dims=3,
                            num_output_channels=number_of_atoms * 16,
                            num_groups=1,
                            conv_dims_i=4,
                            conv_pads_i=0,
                            conv_strides_i=2,
                            conv_dilations_i=1,
                            has_bias=True,
                            name="Deconv_1")
    x = lbann.BatchNormalization(x, name="BN_D1")
    x = lbann.Tanh(x, name="Deconv_1_Activation")

    for i in range(3):
        out_channels = number_of_atoms * (2**(2 - i))
        x = lbann.Deconvolution(x,
                                num_dims=3,
                                num_output_channels=out_channels,
                                num_groups=1,
                                conv_dims_i=4,
                                conv_pads_i=1,
                                conv_strides_i=2,
                                conv_dilations_i=1,
                                has_bias=True,
                                name="Deconv_{0}".format(i + 2))
        x = lbann.BatchNormalization(x, name="BN_D{0}".format(i + 2))

        if (
                i != 2
        ):  #Save the last activation layer because we want to dump the outputs
            x = lbann.Tanh(x, name="Deconv_{0}_Activation".format(i + 2))

    decoded = lbann.Tanh(x, name="decoded")

    img_loss = lbann.MeanSquaredError([decoded, tensors])

    metrics = [lbann.Metric(img_loss, name='recon_error')]
    # ----------------------------------
    # Set up DAG
    # ----------------------------------

    layers = lbann.traverse_layer_graph(input_)  #Generate Model DAG
    return layers, img_loss, metrics