def forward(self, x): x = self.conv1(x) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=3, pool_pads_i=0, pool_strides_i=2, pool_mode='max') x = self.conv2(x) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=3, pool_pads_i=0, pool_strides_i=2, pool_mode='max') x = self.conv3(x) x = self.conv4(x) x = self.conv5(x) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=3, pool_pads_i=0, pool_strides_i=2, pool_mode='max') return x
def forward(self, x): self.instance += 1 x = self.conv1(x) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=2, pool_pads_i=0, pool_strides_i=2, pool_mode='max', name='{0}_pool1_instance{1}'.format( self.name, self.instance)) x = self.conv2(x) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=2, pool_pads_i=0, pool_strides_i=2, pool_mode='max', name='{0}_pool2_instance{1}'.format( self.name, self.instance)) x = self.fc1(x) x = lbann.Dropout(x, keep_prob=0.5, name='{0}_drop6_instance{1}'.format( self.name, self.instance)) x = self.fc2(x) x = lbann.Dropout(x, keep_prob=0.5, name='{0}_drop7_instance{1}'.format( self.name, self.instance)) return self.fc3(x)
def forward(self, x): self.instance += 1 # Convolutional network x = self.conv1(x) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=2, pool_pads_i=0, pool_strides_i=2, pool_mode='max', name='{0}_pool1_instance{1}'.format( self.name, self.instance)) x = self.conv2(x) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=2, pool_pads_i=0, pool_strides_i=2, pool_mode='max', name='{0}_pool2_instance{1}'.format( self.name, self.instance)) return self.fc3(self.fc2(self.fc1(x)))
def forward(self, x): self.instance += 1 x_concat = [] for i in range(self.NUM_LEVELS): x = self.downconvs[i](x) x_concat.append(x) x = lbann.Pooling(x, num_dims=3, has_vectors=False, pool_dims_i=2, pool_pads_i=0, pool_strides_i=2, pool_mode="max", name="{}_pool{}_instance{}".format( self.name, i + 1, self.instance)) x = self.bottomconv(x) for i in range(self.NUM_LEVELS): x = self.deconvs[i](x) x = self.upconvs[i](x, x_concat=x_concat[self.NUM_LEVELS - 1 - i]) x = self.lastconv(x) x = lbann.Softmax(x, softmax_mode="channel") return x
def classification_layer(cumulative_layer_num, parent_node): # 7x7 global average pool pooling_node = lbann.Pooling( parent_node, num_dims=2, pool_dims_i=7, pool_mode='average', pool_pads_i=1, pool_strides_i=1 ) cumulative_layer_num += 1 log('classification_layer Pooling. cumulative_layer_num={n}'.format( n=cumulative_layer_num)) fully_connected_node = lbann.FullyConnected( pooling_node, num_neurons=1000, has_bias=False ) cumulative_layer_num += 1 log('classification_layer FullyConnected. cumulative_layer_num={n}'.format( n=cumulative_layer_num)) probabilities = lbann.Softmax(fully_connected_node) return probabilities
def _test_o2l_layer_Pool(self, numDims, poolMode, onnxOp): N, C, H = (256, 3, 224) K, P, S = (3, 1, 1) lbannPooling = lbann.Pooling(lbann.Input(), num_dims=numDims, has_vectors=False, pool_dims_i=K, pool_pads_i=P, pool_strides_i=S, pool_mode=poolMode) inputShapes = {"x": [N, C] + [H] * numDims} node = onnx.helper.make_node( onnxOp, inputs=["x"], outputs=["y"], kernel_shape=[K] * numDims, pads=[P] * (numDims * 2), strides=[S] * numDims, ) onnxPooling = convertOnnxNode(node, inputShapes, {}).pooling self._assertFields(lbannPooling, onnxPooling)
def create_pooling(x, i): return lbann.Pooling(x, num_dims=3, has_vectors=False, pool_dims_i=3, pool_pads_i=1, pool_strides_i=2, pool_mode='average', name='{0}_pool{1}_instance{2}'.format( self.name, i, self.instance))
def forward(self, x): self.instance += 1 x = self.conv1(x) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=3, pool_pads_i=1, pool_strides_i=2, pool_mode='max', name='{0}_pool1_instance{1}'.format( self.name, self.instance)) for b in self.blocks: x = b(x) return x
def transition_layer(current_block_num, cumulative_layer_num, parent_node, num_output_channels ): batch_normalization_node = standard_batchnorm(parent_node) cumulative_layer_num += 1 log('dense_block={b} > transition_layer BatchNormalization. cumulative_layer_num={n}'.format( b=current_block_num, n=cumulative_layer_num)) relu_node = lbann.Relu(batch_normalization_node) cumulative_layer_num += 1 log('dense_block={b} > transition_layer Relu. cumulative_layer_num={n}'.format( b=current_block_num, n=cumulative_layer_num)) convolution_node = lbann.Convolution( relu_node, conv_dims_i=1, conv_pads_i=0, conv_strides_i=1, has_bias=False, num_dims=2, num_output_channels=num_output_channels ) cumulative_layer_num += 1 log('dense_block={b} > transition_layer Convolution. cumulative_layer_num={n}'.format( b=current_block_num, n=cumulative_layer_num)) # 2x2 average pool, stride 2 pooling_node = lbann.Pooling( convolution_node, num_dims=2, pool_dims_i=2, pool_mode='average', pool_pads_i=0, pool_strides_i=2 ) cumulative_layer_num += 1 log('dense_block={b} > transition_layer Pooling. cumulative_layer_num={n}'.format( b=current_block_num, n=cumulative_layer_num)) return pooling_node, cumulative_layer_num
def initial_layer(cumulative_layer_num, images_node, num_initial_channels ): # 7x7 conv, stride 2 convolution_node = lbann.Convolution( images_node, conv_dims_i=7, conv_pads_i=3, conv_strides_i=2, has_bias=False, num_dims=2, num_output_channels=num_initial_channels ) cumulative_layer_num += 1 log('initial_layer Convolution. cumulative_layer_num={n}'.format( n=cumulative_layer_num)) batch_normalization_node = standard_batchnorm(convolution_node) cumulative_layer_num += 1 log('initial_layer BatchNormalization. cumulative_layer_num={n}'.format( n=cumulative_layer_num)) relu_node = lbann.Relu(batch_normalization_node) cumulative_layer_num += 1 log('initial_layer Relu. cumulative_layer_num={n}'.format( n=cumulative_layer_num)) # 3x3 max pool, stride 2 pooling_node = lbann.Pooling( relu_node, num_dims=2, pool_dims_i=3, pool_mode='max', pool_pads_i=1, pool_strides_i=2 ) cumulative_layer_num += 1 log('initial_layer Pooling. cumulative_layer_num={n}'.format( n=cumulative_layer_num)) return pooling_node, cumulative_layer_num
def forward(self, x): self.instance += 1 # Convolution layer = self.conv(x) # Batchnorm if self.use_bn: layer = lbann.BatchNormalization( layer, weights=self.bn_weights, statistics_group_size=self.bn_statistics_group_size, decay=0.999, parallel_strategy=self.ps, name='{0}_bn_instance{1}'.format(self.name, self.instance)) # Strided pooling # Note: Ideally we would do this immediately after the # convolution, but we run into issues since the tensor # overlaps don't match. ### @todo Support strided convolution in distconv if self.stride != 1: layer = lbann.Pooling(layer, num_dims=3, pool_dims_i=self.stride, pool_strides_i=self.stride, pool_mode='max', parallel_strategy=self.ps, name='{0}_pool_instance{1}'.format( self.name, self.instance)) # Activation if self.activation: layer = self.activation(layer, parallel_strategy=self.ps, name='{0}_activation_instance{1}'.format( self.name, self.instance)) return layer
def _test_l2o_layer_pooling(self, numDims, poolMode, onnxOp): N, C, H = (256, 3, 224) K, P, S = (3, 1, 1) onnxPooling = onnx.helper.make_node( onnxOp, inputs=["x"], outputs=["y"], kernel_shape=[K] * numDims, pads=[P] * (numDims * 2), strides=[S] * numDims, ) layer = lbann.Pooling(lbann.Input(name="x"), num_dims=numDims, has_vectors=False, pool_dims_i=K, pool_pads_i=P, pool_strides_i=S, pool_mode=poolMode) lbannPooling = parseLbannLayer(layer.export_proto(), {"x_0": (N, C, H, H)})["nodes"] self._assertFields(lbannPooling, onnxPooling)
images = lbann.Identity(input_) labels = lbann.Identity(input_) # LeNet x = lbann.Convolution(images, num_dims=2, num_output_channels=6, num_groups=1, conv_dims_i=5, conv_strides_i=1, conv_dilations_i=1, has_bias=True) x = lbann.Relu(x) x = lbann.Pooling(x, num_dims=2, pool_dims_i=2, pool_strides_i=2, pool_mode="max") x = lbann.Convolution(x, num_dims=2, num_output_channels=16, num_groups=1, conv_dims_i=5, conv_strides_i=1, conv_dilations_i=1, has_bias=True) x = lbann.Relu(x) x = lbann.Pooling(x, num_dims=2, pool_dims_i=2, pool_strides_i=2,
def forward(self, x): self.instance += 1 # Convolutional network x = self.conv1(x) x = lbann.LocalResponseNormalization( x, window_width=5, lrn_alpha=0.0001, lrn_beta=0.75, lrn_k=2, name='{0}_norm1_instance{1}'.format(self.name, self.instance)) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=3, pool_pads_i=0, pool_strides_i=2, pool_mode='max', name='{0}_pool1_instance{1}'.format( self.name, self.instance)) x = self.conv2(x) x = lbann.LocalResponseNormalization( x, window_width=5, lrn_alpha=0.0001, lrn_beta=0.75, lrn_k=2, name='{0}_norm2_instance{1}'.format(self.name, self.instance)) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=3, pool_pads_i=0, pool_strides_i=2, pool_mode='max', name='{0}_pool2_instance{1}'.format( self.name, self.instance)) x = self.conv5(self.conv4(self.conv3(x))) x = lbann.Pooling(x, num_dims=2, has_vectors=False, pool_dims_i=3, pool_pads_i=0, pool_strides_i=2, pool_mode='max', name='{0}_pool5_instance{1}'.format( self.name, self.instance)) # Fully-connected network x = self.fc6(x) x = lbann.Dropout(x, keep_prob=0.5, name='{0}_drop6_instance{1}'.format( self.name, self.instance)) x = self.fc7(x) x = lbann.Dropout(x, keep_prob=0.5, name='{0}_drop7_instance{1}'.format( self.name, self.instance)) return self.fc8(x)
elif args.model == "cnn": for i, num_channels in enumerate([20, 50]): x = lbann.Convolution(x, num_dims=2, num_output_channels=num_channels, conv_dims_i=5, conv_pads_i=0, conv_strides_i=1, has_bias=has_bias, name="conv{}".format(i + 1)) x = lbann.Relu(x) x = lbann.Pooling(x, num_dims=2, pool_dims_i=2, pool_pads_i=0, pool_strides_i=2, pool_mode="max", name="pool{}".format(i + 1)) for i, num_neurons in enumerate([500, num_classes]): if i: x = lbann.Relu(x) x = lbann.FullyConnected( x, num_neurons=num_neurons, has_bias=has_bias, name="ip{}".format(i + 1), weights=[ lbann.Weights(initializer=lbann.LeCunNormalInitializer())
conv1 = lbann.Convolution(image, name="conv1", num_dims=2, num_output_channels=16, conv_dims='3 3', conv_pads='0 0', conv_strides='1 1', has_bias=True, has_vectors=True) relu1 = lbann.Relu(conv1, name="relu1") pool1 = lbann.Pooling(relu1, name="pool1", num_dims=2, pool_dims='2 2', pool_pads='0 0', pool_strides='1 1', pool_mode="max", has_vectors=True) conv2 = lbann.Convolution(pool1, name="conv2", num_dims=2, num_output_channels=8, conv_dims='3 3', conv_pads='0 0', conv_strides='1 1', has_bias=True, has_vectors=True) relu2 = lbann.Relu(conv2, name="relu2")