示例#1
0
 def forward_generator(self,z):
     x = lbann.Relu(lbann.BatchNormalization(self.g_fc1(z),decay=0.9,scale_init=1.0,epsilon=1e-5))
     x = lbann.Reshape(x, dims='512 8 8') #channel first
     x = lbann.Relu(lbann.BatchNormalization(self.g_convT[0](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
     x = lbann.Relu(lbann.BatchNormalization(self.g_convT[1](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
     x = lbann.Relu(lbann.BatchNormalization(self.g_convT[2](x),decay=0.9,scale_init=1.0,epsilon=1e-5))
     return self.g_convT3(x) 
示例#2
0
    def forward_generator(self, z, mcr):
        '''
        Build the Generator
        '''
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_fc1(z),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        dims = '512 8 8'
        x = lbann.Reshape(x, dims=dims)  #channel first

        for count, lyr in enumerate(self.g_convT):
            x = lbann.Relu(
                lbann.BatchNormalization(lyr(x),
                                         decay=0.9,
                                         scale_init=1.0,
                                         epsilon=1e-5))

        img = self.g_convT3(x)

        if mcr:  ### For multi-channel rescaling, add extra channel to output image
            linear_scale = 1 / self.linear_scaler
            #             linear_scale=lbann.Constant(value=0.001)
            ch2 = lbann.Tanh(
                lbann.WeightedSum(self.inv_transform(img),
                                  scaling_factors=str(linear_scale)))
            y = lbann.Concatenation(img, ch2, axis=0)
            img = lbann.Reshape(y, dims='2 128 128')
        else:
            img = lbann.Reshape(img, dims='1 128 128')

        return img
示例#3
0
def DGCN_layer(feature_matrix,adj_matrix, node_features):
    """An example 3 layer GCN kernel.
    Args:
        feature_matrix (Layer): Node feature layer. Should have the shape:
                                (num_nodes, node_features)
        adj_matrix (Layer): Adjancency matrix layer. Should have the shape: 
                            (num_nodes, num_nodes)
        node_features (int): The number of features per node
    Returns: 
        (Layer): Returns the new embedding of the node features
    """
    out_channel_1 = 1024
    out_channel_2 = 512
    out_channel_3 = 256
    
    gcn1 = DenseGCNConv(input_channels = node_features, output_channels = out_channel_1)
    gcn2 = DenseGCNConv(input_channels = out_channel_1, output_channels = out_channel_2)
    gcn3 = DenseGCNConv(input_channels = out_channel_2, output_channels = out_channel_3)
    
    out_channel = out_channel_3
    
    x = gcn1(feature_matrix, adj_matrix )
    x = lbann.Relu(x,name="DGCN1_activation") 

    x = gcn2(x, adj_matrix)
    x = lbann.Relu(x, name="DGCN2_activation")
    
    x = gcn3 (x, adj_matrix)
    x = lbann.Relu(x, name="DGCN3_activation")
    return x
 def decoder_cnn(self, z):
     x = self.dec_cnn_fc(z)
     sca = self.dec_fc_sca(lbann.Identity(x))
     img = lbann.Reshape(lbann.Identity(x),
                         dims="16 8 8",
                         name=self.name + 'dec_reshape0')
     img = self.dec_convT[2](lbann.Relu(self.dec_convT[1](lbann.Relu(
         self.dec_convT[0](img)))))
     #concat for common interface, slice in output
     img = lbann.Reshape(img,
                         dims=str(64 * 64 * 4),
                         name=self.name +
                         'dec_reshape1')  #?? check tensor shape
     #todo check that concat size == dec_out_dim
     return lbann.Concatenation([img, sca], axis=0)
示例#5
0
 def forward(self, x):
     self.instance += 1
     x = self.conv(x)
     x = self.bn(x)
     return lbann.Relu(x,
                       name='{0}_relu_instance{1}'.format(
                           self.name, self.instance))
示例#6
0
def conv_block(statistics_group_size, current_block_num, current_layer_num,
               cumulative_layer_num, parent_node, conv_dims_i, conv_pads_i,
               num_output_channels):
    batch_normalization_node = standard_batchnorm(statistics_group_size,
                                                  parent_node)
    cumulative_layer_num += 1
    log('dense_block={b} dense_layer={l} BatchNormalization. cumulative_layer_num={n}'
        .format(b=current_block_num,
                l=current_layer_num,
                n=cumulative_layer_num))

    relu_node = lbann.Relu(batch_normalization_node)
    cumulative_layer_num += 1
    log('dense_block={b} dense_layer={l} Relu. cumulative_layer_num={n}'.
        format(b=current_block_num,
               l=current_layer_num,
               n=cumulative_layer_num))

    convolution_node = lbann.Convolution(
        relu_node,
        conv_dims_i=conv_dims_i,
        conv_pads_i=conv_pads_i,
        conv_strides_i=1,
        has_bias=False,
        num_dims=2,
        num_output_channels=num_output_channels)
    cumulative_layer_num += 1
    log('dense_block={b} dense_layer={l} Convolution. cumulative_layer_num={n}'
        .format(b=current_block_num,
                l=current_layer_num,
                n=cumulative_layer_num))

    return convolution_node, cumulative_layer_num
示例#7
0
    def forward_generator(self, z, mcr):
        '''
        Build the Generator
        '''
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_fc1(z),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        dims = '512 8 8'

        print("dims", dims)
        x = lbann.Reshape(x, dims=dims)  #channel first
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_convT[0](x),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_convT[1](x),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        x = lbann.Relu(
            lbann.BatchNormalization(self.g_convT[2](x),
                                     decay=0.9,
                                     scale_init=1.0,
                                     epsilon=1e-5))
        img = self.g_convT3(x)

        if mcr:  ### For multi-channel rescaling, add extra channel to output image
            linear_scale = 1 / self.linear_scaler
            #ch2 = lbann.Tanh(self.inv_transform(img)/linear_scalar)
            ch2 = lbann.Tanh(
                lbann.WeightedSum(self.inv_transform(img),
                                  scaling_factors=str(linear_scale)))
            y = lbann.Concatenation(img, ch2, axis=0)
            img = lbann.Reshape(y, dims='2 128 128')
        else:
            img = lbann.Reshape(img, dims='1 128 128')

        print('Gen Img in GAN', img.__dict__)
        return img
示例#8
0
文件: resnet.py 项目: oyamay/lbann
 def forward(self, x):
     self.instance += 1
     y1 = self.branch1(x) if self.branch1 else x
     y2 = self.branch2c(self.branch2b(self.branch2a(x)))
     z = lbann.Add([y1, y2],
                   name='{0}_sum_instance{1}'.format(
                       self.name, self.instance))
     return lbann.Relu(z,
                       name='{0}_relu_instance{1}'.format(
                           self.name, self.instance))
示例#9
0
    def forward(self, x, mask=None):
        """Apply Transformer encoder layer.

        Args:
            x (lbann.Layer): Sequence of input vectors.
            mask (lbann.Layer, optional): Attention mask.

        Returns:
            lbann.Layer: Sequence of output vectors.

        """
        self.instance += 1
        name = f'{self.name}_instance{self.instance}'

        # Self-attention with residual connection
        y = self.attention(x, x, x, mask=mask)
        if self.dropout_prob > 0:
            y = lbann.Dropout(
                y,
                keep_prob=1 - self.dropout_prob,
                name=f'{name}_drop1',
            )
        z = lbann.Sum(x, y, name=f'{name}_sum1')
        z = lbann.InstanceNorm(z, name=f'{name}_norm1')
        x = z

        # Feedforward network with residual connection
        y = lbann.ChannelwiseFullyConnected(
            x,
            weights=self.fc1_weights,
            output_channel_dims=[self.feedforward_dim],
            name=f'{name}_fc1',
        )
        y = lbann.Relu(y, name=f'{name}_relu1')
        if self.dropout_prob > 0:
            y = lbann.Dropout(
                y,
                keep_prob=1 - self.dropout_prob,
                name=f'{name}_drop2',
            )
        y = lbann.ChannelwiseFullyConnected(
            y,
            weights=self.fc2_weights,
            output_channel_dims=[self.embed_dim],
            name=f'{name}_fc2',
        )
        if self.dropout_prob > 0:
            y = lbann.Dropout(
                y,
                keep_prob=1 - self.dropout_prob,
                name=f'{name}_drop3',
            )
        z = lbann.Sum(x, y, name=f'{name}_sum2')
        z = lbann.InstanceNorm(z, name=f'{name}_norm2')
        return z
示例#10
0
    def test_o2l_layer_Relu(self):
        N, C, H, W = (100, 200, 300, 400)

        lbannRelu = lbann.Relu(lbann.Input(), )

        node = onnx.helper.make_node(
            "Relu",
            inputs=["x"],
            outputs=["y"],
        )
        onnxRelu = convertOnnxNode(node, {"x": [N, C, H, W]}, {}).relu

        self._assertFields(lbannRelu, onnxRelu)
示例#11
0
 def forward_generator(self, z, ps=None):
     x = lbann.FullyConnected(z,
                              num_neurons=np.prod(self.outc_dims),
                              has_bias=True,
                              device=self.g_device)
     x = lbann.Reshape(x,
                       dims=list2str(self.outc_dims),
                       name='gen_zin_reshape',
                       device=self.g_device)
     x = lbann.Relu(self.g_convT[0](x),
                    name='g_relu0',
                    parallel_strategy=ps,
                    device=self.g_device)
     x = lbann.Relu(self.g_convT[1](x),
                    name='g_relu1',
                    parallel_strategy=ps,
                    device=self.g_device)
     x = lbann.Relu(self.g_convT[2](x),
                    name='g_relu2',
                    parallel_strategy=ps,
                    device=self.g_device)
     return self.g_convT3(x)
示例#12
0
def construct_model():
    """Model description

    """
    import lbann
    import lbann.modules

    fc = lbann.modules.FullyConnectedModule
    conv = lbann.modules.Convolution2dModule

    conv1 = conv(20, 3, stride=1, padding=1, name='conv1')
    conv2 = conv(20, 3, stride=1, padding=1, name='conv2')
    fc1 = fc(100, name='fc1')
    fc2 = fc(20, name='fc2')
    fc3 = fc(num_classes, name='fc3')
    # Layer graph
    input = lbann.Input(name='inp_tensor', target_mode='classification')
    inp_slice = lbann.Slice(input,
                            axis=0,
                            slice_points=str_list([0, dims - 1, dims]),
                            name='inp_slice')
    xdata = lbann.Identity(inp_slice)
    ylabel = lbann.Identity(inp_slice, name='gt_y')
    #NHWC to NCHW
    x = lbann.Reshape(xdata, dims='14 13 13')
    x = conv2(conv1(x))
    x = lbann.Reshape(x, dims='3380')
    x = lbann.Dropout(lbann.Relu(fc1(x)), keep_prob=0.5)
    x = lbann.Dropout(fc2(x), keep_prob=0.5)
    pred = lbann.Softmax(fc3(x))
    gt_label = lbann.OneHot(ylabel, size=num_classes)
    loss = lbann.CrossEntropy([pred, gt_label], name='loss')
    acc = lbann.CategoricalAccuracy([pred, gt_label])

    layers = list(lbann.traverse_layer_graph(input))
    # Setup objective function
    weights = set()
    for l in layers:
        weights.update(l.weights)
    obj = lbann.ObjectiveFunction(loss)

    callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()]

    # Construct model
    num_epochs = 10
    return lbann.Model(num_epochs,
                       weights=weights,
                       layers=layers,
                       metrics=[lbann.Metric(acc, name='accuracy', unit='%')],
                       objective_function=obj,
                       callbacks=callbacks)
示例#13
0
 def forward(self, x):
     self.instance += 1
     conv = self.conv(x)
     bn = lbann.BatchNormalization(
         conv,
         weights=self.bn_weights,
         stats_aggregation=self.bn_stats_aggregation,
         name='{0}_bn_instance{1}'.format(self.name, self.instance))
     if self.relu:
         return lbann.Relu(bn,
                           name='{0}_relu_instance{1}'.format(
                               self.name, self.instance))
     else:
         return bn
示例#14
0
    def test_l2o_layer_relu(self):
        N, C, H, W = (100, 200, 300, 400)

        onnxRelu = onnx.helper.make_node(
            "Relu",
            inputs=["x"],
            outputs=["y"],
        )

        layer = lbann.Relu(lbann.Input(name="x"), )
        lbannRelu = parseLbannLayer(layer.export_proto(),
                                    {"x_0": (N, C, H, W)})["nodes"]

        self._assertFields(lbannRelu, onnxRelu)
示例#15
0
文件: resnet.py 项目: oyamay/lbann
 def forward(self, x):
     self.instance += 1
     conv = self.conv(x)
     bn = lbann.BatchNormalization(
         conv,
         weights=self.bn_weights,
         statistics_group_size=(-1 if self.bn_statistics_group_size == 0
                                else self.bn_statistics_group_size),
         name='{0}_bn_instance{1}'.format(self.name, self.instance))
     if self.relu:
         return lbann.Relu(bn,
                           name='{0}_relu_instance{1}'.format(
                               self.name, self.instance))
     else:
         return bn
示例#16
0
 def encode(self, x):
     x = lbann.Reshape(x, dims=utils.str_list([-1, self.input_dim]))
     for i, dim in enumerate(self.hidden_dims):
         x = lbann.ChannelwiseFullyConnected(
             x,
             weights=self.weights[i],
             output_channel_dims=dim,
             bias=False,
         )
         x = lbann.Relu(x)
     x = lbann.ChannelwiseFullyConnected(
         x,
         weights=self.weights[-1],
         output_channel_dims=self.output_dim,
         bias=False,
     )
     return x
示例#17
0
 def decode(self, x):
     x = lbann.Reshape(x, dims=utils.str_list([-1, self.output_dim]))
     for i in range(len(self.hidden_dims)):
         x = lbann.ChannelwiseFullyConnected(
             x,
             weights=self.weights[-i - 1],
             output_channel_dims=self.hidden_dims[-i - 1],
             transpose=True,
             bias=False,
         )
         x = lbann.Relu(x)
     x = lbann.ChannelwiseFullyConnected(
         x,
         weights=self.weights[0],
         output_channel_dims=self.input_dim,
         transpose=True,
         bias=False,
     )
     return x
示例#18
0
def transition_layer(current_block_num,
                     cumulative_layer_num,
                     parent_node,
                     num_output_channels
                     ):
    batch_normalization_node = standard_batchnorm(parent_node)
    cumulative_layer_num += 1
    log('dense_block={b} > transition_layer BatchNormalization. cumulative_layer_num={n}'.format(
        b=current_block_num,  n=cumulative_layer_num))

    relu_node = lbann.Relu(batch_normalization_node)
    cumulative_layer_num += 1
    log('dense_block={b} > transition_layer Relu. cumulative_layer_num={n}'.format(
        b=current_block_num, n=cumulative_layer_num))

    convolution_node = lbann.Convolution(
        relu_node,
        conv_dims_i=1,
        conv_pads_i=0,
        conv_strides_i=1,
        has_bias=False,
        num_dims=2,
        num_output_channels=num_output_channels
    )
    cumulative_layer_num += 1
    log('dense_block={b} > transition_layer Convolution. cumulative_layer_num={n}'.format(
        b=current_block_num, n=cumulative_layer_num))

    # 2x2 average pool, stride 2
    pooling_node = lbann.Pooling(
        convolution_node,
        num_dims=2,
        pool_dims_i=2,
        pool_mode='average',
        pool_pads_i=0,
        pool_strides_i=2
    )
    cumulative_layer_num += 1
    log('dense_block={b} > transition_layer Pooling. cumulative_layer_num={n}'.format(
        b=current_block_num, n=cumulative_layer_num))

    return pooling_node, cumulative_layer_num
示例#19
0
def initial_layer(cumulative_layer_num,
                  images_node,
                  num_initial_channels
                  ):
    # 7x7 conv, stride 2
    convolution_node = lbann.Convolution(
        images_node,
        conv_dims_i=7,
        conv_pads_i=3,
        conv_strides_i=2,
        has_bias=False,
        num_dims=2,
        num_output_channels=num_initial_channels
    )
    cumulative_layer_num += 1
    log('initial_layer Convolution. cumulative_layer_num={n}'.format(
        n=cumulative_layer_num))

    batch_normalization_node = standard_batchnorm(convolution_node)
    cumulative_layer_num += 1
    log('initial_layer BatchNormalization. cumulative_layer_num={n}'.format(
        n=cumulative_layer_num))

    relu_node = lbann.Relu(batch_normalization_node)
    cumulative_layer_num += 1
    log('initial_layer Relu. cumulative_layer_num={n}'.format(
        n=cumulative_layer_num))

    # 3x3 max pool, stride 2
    pooling_node = lbann.Pooling(
        relu_node,
        num_dims=2,
        pool_dims_i=3,
        pool_mode='max',
        pool_pads_i=1,
        pool_strides_i=2
        )
    cumulative_layer_num += 1
    log('initial_layer Pooling. cumulative_layer_num={n}'.format(
        n=cumulative_layer_num))

    return pooling_node, cumulative_layer_num
示例#20
0
 def forward(self, x):
     self.instance += 1
     x = self.fc(x)
     x = lbann.EntrywiseBatchNormalization(
         x,
         weights=[self.bn_weights[0], self.bn_weights[1]],
         decay=0.9,
         epsilon=1e-5,
         name='{0}_bn_instance{1}'.format(self.name, self.instance),
         data_layout=self.data_layout)
     x = lbann.EntrywiseScaleBias(
         x,
         weights=self.bn_weights[2],
         name='{0}_bn_scalebias_instance{1}'.format(self.name,
                                                    self.instance),
         data_layout=self.data_layout)
     return lbann.Relu(x,
                       name='{0}_relu_instance{1}'.format(
                           self.name, self.instance),
                       data_layout=self.data_layout)
示例#21
0
def make_model(num_vertices=None,
               node_features=None,
               num_classes=None,
               dataset=None,
               kernel_type='GCN',
               callbacks=None,
               num_epochs=1):
    '''Construct a model DAG using one of the Graph Kernels

    Args:
        num_vertices (int): Number of vertices of each graph (default: None) 
        node_features (int): Number of features per noded (default: None)
        num_classes (int): Number of classes as targets (default: None)
        dataset (str): Preset data set to use. Either a datset parameter has to be 
                       supplied or all of num_vertices, node_features, and 
                       num_classes have to be supplied. (default: None) 
        kernel_type (str): Graph Kernel to use in model. Expected one of 
                            GCN, GIN, Graph, or GatedGraph (deafult: GCN)
        callbacks (list): Callbacks for the model. If set to None the model description, 
                          GPU usage, training_output, and timer is reported. 
                          (default: None)                    
        num_epochs (int): Number of epochs to run (default: 1)
    Returns:
        (lbann Model Object: A model object with the supplied callbacks, dataset
                               presets, and graph kernels. 
    '''

    assert num_vertices != dataset  #Ensure atleast one of the values is set

    if dataset is not None:
        assert num_vertices is None

        if dataset == 'MNIST':
            num_vertices = 75
            num_classes = 10
            node_features = 1

        elif dataset == 'PROTEINS':
            num_vertices = 100
            num_classes = 2
            node_features = 3
        else:
            raise Exception("Unkown Dataset")

    assert num_vertices is not None
    assert num_classes is not None
    assert node_features is not None

    #----------------------------------
    # Reshape and Slice Input Tensor
    #----------------------------------

    input_ = lbann.Input(target_mode='classification')

    # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes )
    # Input should have atleast two children since the target is classification

    data = lbann_Graph_Data(input_, num_vertices, node_features, num_classes)

    feature_matrix = data.x
    adj_matrix = data.adj
    target = data.y

    #----------------------------------
    # Perform Graph Convolution
    #----------------------------------

    if kernel_type == 'GIN':
        x = GINConvLayer(feature_matrix, adj_matrix)
    elif kernel_type == 'GCN':
        x = GCNConvLayer(feature_matrix, adj_matrix)
    elif kernel_type == 'Graph':
        x = GraphConvLayer(feature_matrix, adj_matrix)
    elif kernel_type == 'GatedGraph':
        x = GATConvLayer(feature_matrix, adj_matrix)
    else:
        ValueError(
            'Invalid Graph kernel specifier "{}" recieved. Expected one of:\
                    GIN,GCN,Graph or GatedGraph'.format(kernel_type))

    out_channel = x.shape[1]
    #----------------------------------
    # Apply Reduction on Node Features
    #----------------------------------

    average_vector = lbann.Constant(value=1 / num_vertices,
                                    num_neurons=str_list([1, num_vertices]),
                                    name="Average_Vector")
    x = x.get_mat(out_channel)

    x = lbann.MatMul(average_vector, x, name="Node_Feature_Reduction")

    # X is now a vector with output_channel dimensions

    x = lbann.Reshape(x, dims=str_list([out_channel]), name="Squeeze")
    x = lbann.FullyConnected(x, num_neurons=64, name="hidden_layer_1")
    x = lbann.Relu(x, name="hidden_layer_1_activation")
    x = lbann.FullyConnected(x,
                             num_neurons=num_classes,
                             name="Output_Fully_Connected")

    #----------------------------------
    # Loss Function and Accuracy s
    #----------------------------------

    probs = lbann.Softmax(x, name="Softmax")
    loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss")
    accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy")

    layers = lbann.traverse_layer_graph(input_)

    if callbacks is None:
        print_model = lbann.CallbackPrintModelDescription(
        )  #Prints initial Model after Setup
        training_output = lbann.CallbackPrint(
            interval=1,
            print_global_stat_only=False)  #Prints training progress
        gpu_usage = lbann.CallbackGPUMemoryUsage()
        timer = lbann.CallbackTimer()
        callbacks = [print_model, training_output, gpu_usage, timer]
    else:
        if isinstance(callbacks, list):
            callbacks = callbacks

    metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")]

    model = lbann.Model(num_epochs,
                        layers=layers,
                        objective_function=loss,
                        metrics=metrics,
                        callbacks=callbacks)
    return model
示例#22
0
def make_model(NUM_NODES,
               NUM_EDGES,
               NUM_NODES_FEATURES,
               NUM_EDGE_FEATURES,
               EMBEDDING_DIM,
               EDGE_EMBEDDING_DIM,
               NUM_OUT_FEATURES,
               NUM_EPOCHS,
               NUM_GROUPS=0):
    """ Creates an LBANN model for the OGB-LSC PPQM4M Dataset

	Args:
		NUM_NODES (int): The number of nodes in the largest graph in the dataset (51 for LSC-PPQM4M)
  	NUM_EDGES (int): The number of edges in the largest graph in the dataset (118 for LSC-PPQM4M)
  	NUM_NODES_FEATURES (int): The dimensionality of the input node features vector (9 for LSC-PPQM4M)
  	NUM_EDGE_FEATURES (int): The dimensionality of the input edge feature vectors (3 for LSC-PPQM4M)
  	EMBEDDING_DIM (int): The embedding dimensionality of the node feature vector
  	EDGE_EMBEDDING_DIM (int): The embedding dimensionality of the edge feature vector
  	NUM_OUT_FEATURES (int): The dimensionality of the node feature vectors after graph convolutions
  	NUM_EPOCHS (int): The number of epochs to train the network
  	NUM_GROUPS (int): The number of channel groups for distconv channelwise  fully connected layer (default : 0)
	Returns:
		(Model): lbann model object
		"""
    in_channel = EMBEDDING_DIM
    out_channel = NUM_OUT_FEATURES
    output_dimension = 1

    _input = lbann.Input(data_field='samples')
    node_feature_mat, neighbor_feature_mat, edge_feature_mat, edge_indices, target = \
     graph_data_splitter(_input,
               NUM_NODES,
               NUM_EDGES,
               NUM_NODES_FEATURES,
                NUM_EDGE_FEATURES,
               EMBEDDING_DIM,
               EDGE_EMBEDDING_DIM)

    x = NNConvLayer(node_feature_mat, neighbor_feature_mat, edge_feature_mat,
                    edge_indices, in_channel, out_channel, EDGE_EMBEDDING_DIM,
                    NUM_NODES, NUM_EDGES, NUM_GROUPS)

    for i, num_neurons in enumerate([256, 128, 32, 8], 1):
        x = lbann.FullyConnected(x,
                                 num_neurons=num_neurons,
                                 name="hidden_layer_{}".format(i))
        x = lbann.Relu(x, name='hidden_layer_{}_activation'.format(i))
    x = lbann.FullyConnected(x, num_neurons=output_dimension, name="output")

    loss = lbann.MeanAbsoluteError(x, target)

    layers = lbann.traverse_layer_graph(_input)
    training_output = lbann.CallbackPrint(interval=1,
                                          print_global_stat_only=False)
    gpu_usage = lbann.CallbackGPUMemoryUsage()
    timer = lbann.CallbackTimer()

    callbacks = [training_output, gpu_usage, timer]
    model = lbann.Model(NUM_EPOCHS,
                        layers=layers,
                        objective_function=loss,
                        callbacks=callbacks)
    return model
示例#23
0
文件: lenet.py 项目: oyamay/lbann
# Input data
input_ = lbann.Input(target_mode='classification')
images = lbann.Identity(input_)
labels = lbann.Identity(input_)

# LeNet
x = lbann.Convolution(images,
                      num_dims=2,
                      num_output_channels=6,
                      num_groups=1,
                      conv_dims_i=5,
                      conv_strides_i=1,
                      conv_dilations_i=1,
                      has_bias=True)
x = lbann.Relu(x)
x = lbann.Pooling(x,
                  num_dims=2,
                  pool_dims_i=2,
                  pool_strides_i=2,
                  pool_mode="max")
x = lbann.Convolution(x,
                      num_dims=2,
                      num_output_channels=16,
                      num_groups=1,
                      conv_dims_i=5,
                      conv_strides_i=1,
                      conv_dilations_i=1,
                      has_bias=True)
x = lbann.Relu(x)
x = lbann.Pooling(x,
示例#24
0
args = parser.parse_args()

# Start of layers

# Construct layer graph
input_ = lbann.Input(name='data')
image = lbann.Split(input_, name='image')
dummy = lbann.Dummy(input_, name='dummy')

# Encoder
encode1 = lbann.FullyConnected(image,
                               name="encode1",
                               num_neurons=1000,
                               has_bias=True)

encode1neuron = lbann.Relu(encode1, name="encode1neuron")

encode2 = lbann.FullyConnected(encode1neuron,
                               name="encode2",
                               num_neurons=500,
                               has_bias=True)

encode2neuron = lbann.Relu(encode2, name="encode2neuron")

encode3 = lbann.FullyConnected(encode2neuron,
                               name="encode3",
                               num_neurons=250,
                               has_bias=True)

encode3neuron = lbann.Relu(encode3, name="encode3neuron")
示例#25
0
    print("Dataset must be cifar10 or imagenet. Try again.")
    exit()

# Construct layer graph
input_ = lbann.Input(name='input')
image = lbann.Identity(input_, name='images')
dummy = lbann.Dummy(input_, name='labels')

# Encoder
encode1 = lbann.FullyConnected(image,
                               name="encode1",
                               data_layout="model_parallel",
                               num_neurons=1000,
                               has_bias=True)

relu1 = lbann.Relu(encode1, name="relu1", data_layout="model_parallel")

dropout1 = lbann.Dropout(relu1,
                         name="dropout1",
                         data_layout="model_parallel",
                         keep_prob=0.8)

decode1 = lbann.FullyConnected(dropout1,
                               name="decode1",
                               data_layout="model_parallel",
                               hint_layer=image,
                               has_bias=True)

reconstruction = lbann.Sigmoid(decode1,
                               name="reconstruction",
                               data_layout="model_parallel")
示例#26
0
# Start of layers

# Construct layer graph
input_ = lbann.Input(name='data')
finetunedata = lbann.Split(input_, name='finetunedata')
label = lbann.Split(input_, name='label')

# Encoder
encode1 = lbann.FullyConnected(finetunedata,
                               name="encode1",
                               data_layout="model_parallel",
                               num_neurons=2000,
                               has_bias=True)

relu1 = lbann.Relu(encode1, name="relu1", data_layout="model_parallel")

encode2 = lbann.FullyConnected(relu1,
                               name="encode2",
                               data_layout="model_parallel",
                               num_neurons=1000,
                               has_bias=True)

relu2 = lbann.Relu(encode2, name="relu2", data_layout="model_parallel")

encode3 = lbann.FullyConnected(relu2,
                               name="encode3",
                               data_layout="model_parallel",
                               num_neurons=500,
                               has_bias=True)
示例#27
0
def densenet(statistics_group_size, version, cumulative_layer_num,
             images_node):
    if version == 121:
        growth_rate = 32  # k in the paper
        layers_per_block = (6, 12, 24, 16)
        num_initial_features = 64
    elif version == 161:
        growth_rate = 48  # k in the paper
        layers_per_block = (96, 48, 36, 24)
        num_initial_features = 96
    else:
        raise Exception('Invalid version={v}.'.format(v=version))
    batch_norm_size = 4

    parent_node, cumulative_layer_num = initial_layer(statistics_group_size,
                                                      cumulative_layer_num,
                                                      images_node,
                                                      num_initial_features)
    num_features = num_initial_features
    # Start counting dense blocks at 1.
    for current_block_num, num_layers in enumerate(layers_per_block, 1):
        parent_nodes, cumulative_layer_num = dense_block(
            statistics_group_size,
            cumulative_layer_num,
            parent_node,
            batch_norm_size=batch_norm_size,
            current_block_num=current_block_num,
            growth_rate=growth_rate,
            num_layers=num_layers,
            num_initial_channels=num_initial_features)
        # num_features += num_layers * growth_rate
        for node in parent_nodes[1:]:
            num_features += node.num_output_channels
        parent_node = lbann.Concatenation(parent_nodes)
        cumulative_layer_num += 1
        log('densenet Concatenation. cumulative_layer_num={n}'.format(
            b=current_block_num, n=cumulative_layer_num))
        if current_block_num != len(layers_per_block):
            parent_node, cumulative_layer_num = transition_layer(
                statistics_group_size,
                current_block_num,
                cumulative_layer_num,
                parent_node,
                # In Python 3, this is integer division.
                num_output_channels=num_features // 2,
            )
            num_features //= 2

    batch_normalization_node = standard_batchnorm(statistics_group_size,
                                                  parent_node)
    cumulative_layer_num += 1
    log('densenet BatchNormalization. cumulative_layer_num={n}'.format(
        b=current_block_num, n=cumulative_layer_num))

    relu_node = lbann.Relu(batch_normalization_node)
    cumulative_layer_num += 1
    log('densenet Relu. cumulative_layer_num={n}'.format(
        b=current_block_num, n=cumulative_layer_num))

    probs = classification_layer(cumulative_layer_num, relu_node)
    return probs
示例#28
0
    def forward(self, x, memory, src_mask=None, tgt_mask=None):
        """Apply Transformer decoder layer.

        Args:
            x (lbann.Layer): Sequence of input vectors.
            memory (lbann.Layer): Sequence of vectors produced by
                Transformer encoder stack.
            src_mask (lbann.Layer, optional): Attention mask for
                second attention module (attends to both `x` and
                `memory`).
            tgt_mask (lbann.Layer, optional): Attention mask for first
                attention module (attends only to `x`).

        Returns:
            lbann.Layer: Sequence of output vectors.

        """
        self.instance += 1
        name = f'{self.name}_instance{self.instance}'

        # Self-attention with residual connection
        y = self.attention1(x, x, x, mask=tgt_mask)
        if self.dropout_prob > 0:
            y = lbann.Dropout(
                y,
                keep_prob=1 - self.dropout_prob,
                name=f'{name}_drop1',
            )
        z = lbann.Sum(x, y, name=f'{name}_sum1')
        z = lbann.InstanceNorm(z, name=f'{name}_norm1')
        x = z

        # Attention on encoder output with residual connection
        y = self.attention2(x, memory, memory, mask=src_mask)
        if self.dropout_prob > 0:
            y = lbann.Dropout(
                y,
                keep_prob=1 - self.dropout_prob,
                name=f'{name}_drop2',
            )
        z = lbann.Sum(x, y, name=f'{name}_sum2')
        z = lbann.InstanceNorm(z, name=f'{name}_norm2')
        x = z

        # Feedforward network with residual connection
        y = lbann.ChannelwiseFullyConnected(
            x,
            weights=self.fc1_weights,
            output_channel_dims=[self.feedforward_dim],
            name=f'{name}_fc1',
        )
        y = lbann.Relu(y, name=f'{name}_relu1')
        if self.dropout_prob > 0:
            y = lbann.Dropout(
                y,
                keep_prob=1 - self.dropout_prob,
                name=f'{name}_drop3',
            )
        y = lbann.ChannelwiseFullyConnected(
            y,
            weights=self.fc2_weights,
            output_channel_dims=[self.embed_dim],
            name=f'{name}_fc2',
        )
        if self.dropout_prob > 0:
            y = lbann.Dropout(
                y,
                keep_prob=1 - self.dropout_prob,
                name=f'{name}_drop4',
            )
        z = lbann.Sum(x, y, name=f'{name}_sum3')
        z = lbann.InstanceNorm(z, name=f'{name}_norm3')
        return z
# Start of layers

# Construct layer graph
input_ = lbann.Input(name='input', target_mode="reconstruction")
data = lbann.Split(input_, name='data')
dummy = lbann.Dummy(input_, name='dummy')

# Encoder
encode1 = lbann.FullyConnected(data,
                               name="encode1",
                               data_layout="model_parallel",
                               num_neurons=2000,
                               has_bias=True)

relu1 = lbann.Relu(encode1, name="relu1", data_layout="model_parallel")

encode2 = lbann.FullyConnected(relu1,
                               name="encode2",
                               data_layout="model_parallel",
                               num_neurons=1000,
                               has_bias=True)

relu2 = lbann.Relu(encode2, name="relu2", data_layout="model_parallel")

encode3 = lbann.FullyConnected(relu2,
                               name="encode3",
                               data_layout="model_parallel",
                               num_neurons=500,
                               has_bias=True)
示例#30
0
def make_model(num_vertices=None,
               node_features=None,
               num_classes=None,
               kernel_type='GCN',
               callbacks=None,
               num_epochs=1):
    '''Construct a model DAG using one of the Graph Kernels

    Args:
        num_vertices (int): Number of vertices of each graph (default: None)
        node_features (int): Number of features per noded (default: None)
        num_classes (int): Number of classes as targets (default: None)

        kernel_type (str): Graph Kernel to use in model. Expected one of
                            GCN, GIN, Graph, or GatedGraph (deafult: GCN)
        callbacks (list): Callbacks for the model. If set to None the model description,
                          GPU usage, training_output, and timer is reported.
                          (default: None)
        num_epochs (int): Number of epochs to run (default: 1)
    Returns:
        (lbann.Model) : A model object with the supplied callbacks, dataset
                               presets, and graph kernels.
    '''

    num_vertices = 100
    num_classes = 2
    node_feature_size = 3
    max_edges = 415

    #----------------------------------
    # Reshape and Slice Input Tensor
    #----------------------------------

    input_ = lbann.Input(data_field='samples')

    # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes )

    data = Graph_Data_Parser(input_, num_vertices, node_feature_size,
                             max_edges, num_classes)

    feature_matrix = data['node_features']
    source_indices = data['source_indices']
    target_indices = data['target_indices']
    target = data['target']

    #----------------------------------
    # Select Graph Convolution
    #----------------------------------

    output_channels = 16
    graph_kernel_op = None
    if kernel_type == 'GIN':
        graph_kernel_op = GINConvLayer
    elif kernel_type == 'GCN':
        graph_kernel_op = GCNConvLayer
    elif kernel_type == 'Graph':
        graph_kernel_op = GraphConvLayer
    elif kernel_type == 'GatedGraph':
        graph_kernel_op = GATConvLayer
    else:
        raise ValueError(
            'Invalid Graph kernel specifier "{}" recieved. Expected one of:\
                    GIN,GCN,Graph or GatedGraph'.format(kernel_type))
    #----------------------------------
    # Perform Graph Convolution
    #----------------------------------

    x = graph_kernel_op(feature_matrix, source_indices, target_indices,
                        num_vertices, max_edges, node_feature_size,
                        output_channels)
    #----------------------------------
    # Apply Reduction on Node Features
    #----------------------------------

    average_vector = lbann.Constant(value=1 / num_vertices,
                                    num_neurons=str_list([1, num_vertices]),
                                    name="Average_Vector")

    x = lbann.MatMul(average_vector, x, name="Node_Feature_Reduction")

    # X is now a vector with output_channel dimensions

    x = lbann.Reshape(x, dims=str_list([output_channels]), name="Squeeze")
    x = lbann.FullyConnected(x, num_neurons=64, name="hidden_layer_1")
    x = lbann.Relu(x, name="hidden_layer_1_activation")
    x = lbann.FullyConnected(x,
                             num_neurons=num_classes,
                             name="Output_Fully_Connected")

    #----------------------------------
    # Loss Function and Accuracy s
    #----------------------------------

    probs = lbann.Softmax(x, name="Softmax")
    loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss")
    accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy")

    layers = lbann.traverse_layer_graph(input_)

    if callbacks is None:
        print_model = lbann.CallbackPrintModelDescription(
        )  #Prints initial Model after Setup
        training_output = lbann.CallbackPrint(
            interval=1,
            print_global_stat_only=False)  #Prints training progress
        gpu_usage = lbann.CallbackGPUMemoryUsage()
        timer = lbann.CallbackTimer()
        callbacks = [print_model, training_output, gpu_usage, timer]
    else:
        if isinstance(callbacks, list):
            callbacks = callbacks

    metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")]

    model = lbann.Model(num_epochs,
                        layers=layers,
                        objective_function=loss,
                        metrics=metrics,
                        callbacks=callbacks)
    return model