コード例 #1
0
    def _test_o2l_layer_Conv(self, numDims, hasBias):
        N, C_in, H = (256, 3, 224)
        C_out = 64
        K, P, S, D = (3, 1, 1, 1)
        G = 1

        lbannConv = lbann.Convolution(lbann.Input(),
                                      num_dims=numDims,
                                      num_output_channels=C_out,
                                      has_vectors=False,
                                      conv_dims_i=K,
                                      conv_pads_i=P,
                                      conv_strides_i=S,
                                      conv_dilations_i=D,
                                      num_groups=G,
                                      has_bias=hasBias)

        inputShapes = {"x": [N, C_in] + [H] * numDims}
        paramShapes = {"W": [C_out, C_in] + [K] * numDims}
        if hasBias:
            paramShapes["b"] = [C_out]

        node = onnx.helper.make_node("Conv",
                                     inputs=["x", "W"] +
                                     (["b"] if hasBias else []),
                                     outputs=["y"],
                                     kernel_shape=[K] * numDims,
                                     pads=[P] * (numDims * 2),
                                     strides=[S] * numDims,
                                     dilations=[D] * numDims,
                                     group=G)
        onnxConv = convertOnnxNode(node, inputShapes, paramShapes).convolution

        self._assertFields(lbannConv, onnxConv)
コード例 #2
0
def conv_block(statistics_group_size, current_block_num, current_layer_num,
               cumulative_layer_num, parent_node, conv_dims_i, conv_pads_i,
               num_output_channels):
    batch_normalization_node = standard_batchnorm(statistics_group_size,
                                                  parent_node)
    cumulative_layer_num += 1
    log('dense_block={b} dense_layer={l} BatchNormalization. cumulative_layer_num={n}'
        .format(b=current_block_num,
                l=current_layer_num,
                n=cumulative_layer_num))

    relu_node = lbann.Relu(batch_normalization_node)
    cumulative_layer_num += 1
    log('dense_block={b} dense_layer={l} Relu. cumulative_layer_num={n}'.
        format(b=current_block_num,
               l=current_layer_num,
               n=cumulative_layer_num))

    convolution_node = lbann.Convolution(
        relu_node,
        conv_dims_i=conv_dims_i,
        conv_pads_i=conv_pads_i,
        conv_strides_i=1,
        has_bias=False,
        num_dims=2,
        num_output_channels=num_output_channels)
    cumulative_layer_num += 1
    log('dense_block={b} dense_layer={l} Convolution. cumulative_layer_num={n}'
        .format(b=current_block_num,
                l=current_layer_num,
                n=cumulative_layer_num))

    return convolution_node, cumulative_layer_num
コード例 #3
0
    def _test_l2o_layer_convolution(self, numDims, hasBias):
        N, C_in, H = (256, 3, 224)
        C_out = 64
        K, P, S, D = (3, 1, 1, 1)
        G = 1

        onnxConv = onnx.helper.make_node("Conv",
                                         inputs=["x", "W"] +
                                         (["b"] if hasBias else []),
                                         outputs=["y"],
                                         kernel_shape=[K] * numDims,
                                         pads=[P] * (numDims * 2),
                                         strides=[S] * numDims,
                                         dilations=[D] * numDims,
                                         group=G)

        layer = lbann.Convolution(lbann.Input(name="x"),
                                  num_dims=numDims,
                                  num_output_channels=C_out,
                                  has_vectors=False,
                                  conv_dims_i=K,
                                  conv_pads_i=P,
                                  conv_strides_i=S,
                                  conv_dilations_i=D,
                                  num_groups=G,
                                  has_bias=hasBias)
        lbannConv = parseLbannLayer(layer.export_proto(),
                                    {"x_0": (N, C_in, H, H)})["nodes"]

        self._assertFields(lbannConv, onnxConv)
コード例 #4
0
    def forward(self, x):
        self.instance += 1
        name = '{0}_instance{1}'.format(self.name, self.instance)

        convtype = ('_deconv' if self.transpose else '_conv')
        kwargs = {}
        kwargs['weights'] = self.weights

        kwargs['name'] = (name + convtype if self.activation else name)
        kwargs['num_dims'] = self.num_dims
        kwargs['num_output_channels'] = self.out_channels
        kwargs['has_bias'] = self.bias
        kwargs['num_groups'] = self.groups
        kwargs['parallel_strategy'] = self.parallel_strategy
        kwargs['has_vectors'] = True

        kwargs['conv_dims'] = str_list(self.kernel_dims)
        kwargs['conv_pads'] = str_list(self.padding)
        kwargs['conv_dilations'] = str_list(self.dilation)
        kwargs['conv_strides'] = str_list(self.stride)

        if (self.transpose):
            y = lbann.Deconvolution(x, **kwargs)
        else:
            y = lbann.Convolution(x, **kwargs)
        if self.activation:
            return self.activation(y, name=name + '_activation')
        else:
            return y
コード例 #5
0
ファイル: modules.py プロジェクト: wderekjones/lbann
 def forward(self, x):
     self.instance += 1
     name = '{0}_instance{1}'.format(self.name, self.instance)
     y = lbann.Convolution(x,
                           weights=self.weights,
                           name=(name+'_conv' if self.activation else name),
                           num_dims=self.num_dims,
                           num_output_channels=self.out_channels,
                           has_vectors=False,
                           conv_dims_i=self.kernel_size,
                           conv_pads_i=self.padding,
                           conv_strides_i=self.stride,
                           conv_dilations_i=self.dilation,
                           num_groups=self.groups,
                           has_bias=self.bias)
     if self.activation:
         return self.activation(y, name=name+'_activation')
     else:
         return y
コード例 #6
0
def transition_layer(current_block_num,
                     cumulative_layer_num,
                     parent_node,
                     num_output_channels
                     ):
    batch_normalization_node = standard_batchnorm(parent_node)
    cumulative_layer_num += 1
    log('dense_block={b} > transition_layer BatchNormalization. cumulative_layer_num={n}'.format(
        b=current_block_num,  n=cumulative_layer_num))

    relu_node = lbann.Relu(batch_normalization_node)
    cumulative_layer_num += 1
    log('dense_block={b} > transition_layer Relu. cumulative_layer_num={n}'.format(
        b=current_block_num, n=cumulative_layer_num))

    convolution_node = lbann.Convolution(
        relu_node,
        conv_dims_i=1,
        conv_pads_i=0,
        conv_strides_i=1,
        has_bias=False,
        num_dims=2,
        num_output_channels=num_output_channels
    )
    cumulative_layer_num += 1
    log('dense_block={b} > transition_layer Convolution. cumulative_layer_num={n}'.format(
        b=current_block_num, n=cumulative_layer_num))

    # 2x2 average pool, stride 2
    pooling_node = lbann.Pooling(
        convolution_node,
        num_dims=2,
        pool_dims_i=2,
        pool_mode='average',
        pool_pads_i=0,
        pool_strides_i=2
    )
    cumulative_layer_num += 1
    log('dense_block={b} > transition_layer Pooling. cumulative_layer_num={n}'.format(
        b=current_block_num, n=cumulative_layer_num))

    return pooling_node, cumulative_layer_num
コード例 #7
0
def initial_layer(cumulative_layer_num,
                  images_node,
                  num_initial_channels
                  ):
    # 7x7 conv, stride 2
    convolution_node = lbann.Convolution(
        images_node,
        conv_dims_i=7,
        conv_pads_i=3,
        conv_strides_i=2,
        has_bias=False,
        num_dims=2,
        num_output_channels=num_initial_channels
    )
    cumulative_layer_num += 1
    log('initial_layer Convolution. cumulative_layer_num={n}'.format(
        n=cumulative_layer_num))

    batch_normalization_node = standard_batchnorm(convolution_node)
    cumulative_layer_num += 1
    log('initial_layer BatchNormalization. cumulative_layer_num={n}'.format(
        n=cumulative_layer_num))

    relu_node = lbann.Relu(batch_normalization_node)
    cumulative_layer_num += 1
    log('initial_layer Relu. cumulative_layer_num={n}'.format(
        n=cumulative_layer_num))

    # 3x3 max pool, stride 2
    pooling_node = lbann.Pooling(
        relu_node,
        num_dims=2,
        pool_dims_i=3,
        pool_mode='max',
        pool_pads_i=1,
        pool_strides_i=2
        )
    cumulative_layer_num += 1
    log('initial_layer Pooling. cumulative_layer_num={n}'.format(
        n=cumulative_layer_num))

    return pooling_node, cumulative_layer_num
コード例 #8
0
ファイル: lenet.py プロジェクト: oyamay/lbann
args = parser.parse_args()

# ----------------------------------
# Construct layer graph
# ----------------------------------

# Input data
input_ = lbann.Input(target_mode='classification')
images = lbann.Identity(input_)
labels = lbann.Identity(input_)

# LeNet
x = lbann.Convolution(images,
                      num_dims=2,
                      num_output_channels=6,
                      num_groups=1,
                      conv_dims_i=5,
                      conv_strides_i=1,
                      conv_dilations_i=1,
                      has_bias=True)
x = lbann.Relu(x)
x = lbann.Pooling(x,
                  num_dims=2,
                  pool_dims_i=2,
                  pool_strides_i=2,
                  pool_mode="max")
x = lbann.Convolution(x,
                      num_dims=2,
                      num_output_channels=16,
                      num_groups=1,
                      conv_dims_i=5,
                      conv_strides_i=1,
コード例 #9
0
def gen_layers(latent_dim, number_of_atoms):
    ''' Generates the model for the 3D Convolutional Auto Encoder. 
        
                returns the Directed Acyclic Graph (DAG) that the lbann 
        model will run on. 
    '''

    input_ = lbann.Input(target_mode="reconstruction")
    tensors = lbann.Identity(input_)

    tensors = lbann.Reshape(tensors, dims="11 32 32 32", name="Sample")
    # Input tensor shape is  (number_of_atoms)x32x32x32

    # Encoder

    x = lbann.Identity(tensors)
    for i in range(4):
        out_channels = latent_dim // (2**(3 - i))

        x = lbann.Convolution(x,
                              num_dims=3,
                              num_output_channels=out_channels,
                              num_groups=1,
                              conv_dims_i=4,
                              conv_strides_i=2,
                              conv_dilations_i=1,
                              conv_pads_i=1,
                              has_bias=True,
                              name="Conv_{0}".format(i))

        x = lbann.BatchNormalization(x, name="Batch_NORM_{0}".format(i + 1))
        x = lbann.LeakyRelu(x, name="Conv_{0}_Activation".format(i + 1))

    # Shape: (latent_dim)x2x2x2
    encoded = lbann.Convolution(x,
                                num_dims=3,
                                num_output_channels=latent_dim,
                                num_groups=1,
                                conv_dims_i=2,
                                conv_strides_i=2,
                                conv_dilations_i=1,
                                conv_pads_i=0,
                                has_bias=True,
                                name="encoded")

    # Shape: (latent_dim)1x1x1

    # Decoder

    x = lbann.Deconvolution(encoded,
                            num_dims=3,
                            num_output_channels=number_of_atoms * 16,
                            num_groups=1,
                            conv_dims_i=4,
                            conv_pads_i=0,
                            conv_strides_i=2,
                            conv_dilations_i=1,
                            has_bias=True,
                            name="Deconv_1")
    x = lbann.BatchNormalization(x, name="BN_D1")
    x = lbann.Tanh(x, name="Deconv_1_Activation")

    for i in range(3):
        out_channels = number_of_atoms * (2**(2 - i))
        x = lbann.Deconvolution(x,
                                num_dims=3,
                                num_output_channels=out_channels,
                                num_groups=1,
                                conv_dims_i=4,
                                conv_pads_i=1,
                                conv_strides_i=2,
                                conv_dilations_i=1,
                                has_bias=True,
                                name="Deconv_{0}".format(i + 2))
        x = lbann.BatchNormalization(x, name="BN_D{0}".format(i + 2))

        if (
                i != 2
        ):  #Save the last activation layer because we want to dump the outputs
            x = lbann.Tanh(x, name="Deconv_{0}_Activation".format(i + 2))

    decoded = lbann.Tanh(x, name="decoded")

    img_loss = lbann.MeanSquaredError([decoded, tensors])

    metrics = [lbann.Metric(img_loss, name='recon_error')]
    # ----------------------------------
    # Set up DAG
    # ----------------------------------

    layers = lbann.traverse_layer_graph(input_)  #Generate Model DAG
    return layers, img_loss, metrics
コード例 #10
0
        x = lbann.FullyConnected(
            x,
            num_neurons=num_neurons,
            has_bias=has_bias,
            name="ip{}".format(i + 1),
            weights=[
                lbann.Weights(initializer=lbann.LeCunNormalInitializer())
            ])

elif args.model == "cnn":
    for i, num_channels in enumerate([20, 50]):
        x = lbann.Convolution(x,
                              num_dims=2,
                              num_output_channels=num_channels,
                              conv_dims_i=5,
                              conv_pads_i=0,
                              conv_strides_i=1,
                              has_bias=has_bias,
                              name="conv{}".format(i + 1))
        x = lbann.Relu(x)
        x = lbann.Pooling(x,
                          num_dims=2,
                          pool_dims_i=2,
                          pool_pads_i=0,
                          pool_strides_i=2,
                          pool_mode="max",
                          name="pool{}".format(i + 1))

    for i, num_neurons in enumerate([500, num_classes]):
        if i:
            x = lbann.Relu(x)
コード例 #11
0
else:
    print("Dataset must be cifar10 or imagenet. Try again.")
    exit()

# Construct layer graph
input_ = lbann.Input(name='input', target_mode='classification')
image = lbann.Identity(input_, name='images')
dummy = lbann.Dummy(input_, name='labels')

# Encoder

conv1 = lbann.Convolution(image,
                          name="conv1",
                          num_dims=2,
                          num_output_channels=16,
                          conv_dims='3 3',
                          conv_pads='0 0',
                          conv_strides='1 1',
                          has_bias=True,
                          has_vectors=True)

relu1 = lbann.Relu(conv1, name="relu1")

pool1 = lbann.Pooling(relu1,
                      name="pool1",
                      num_dims=2,
                      pool_dims='2 2',
                      pool_pads='0 0',
                      pool_strides='1 1',
                      pool_mode="max",
                      has_vectors=True)