예제 #1
0
def PytorchLinear(x,
                  input_shape,
                  hidden_size,
                  weights=[],
                  name="",
                  return_dims=False):
    need_reshape = len(input_shape) > 2
    if need_reshape:
        new_in_shape = (np.prod(input_shape[:-1]), input_shape[-1])
        x = lbann.Reshape(x, dims=str_list(new_in_shape))

    if len(input_shape) == 1:
        y = lbann.FullyConnected(x,
                                 num_neurons=hidden_size,
                                 weights=weights,
                                 name=name)
    else:
        y = lbann.ChannelwiseFullyConnected(x,
                                            output_channel_dims=[hidden_size],
                                            weights=weights,
                                            name=name)

    if need_reshape:
        new_out_shape = input_shape[:-1] + (hidden_size, )
        y = lbann.Reshape(y, dims=str_list(new_out_shape))
    else:
        new_out_shape = (input_shape[0], hidden_size)

    if return_dims:
        return y, new_out_shape
    return y
예제 #2
0
def classification_layer(cumulative_layer_num,
                         parent_node):
    # 7x7 global average pool
    pooling_node = lbann.Pooling(
        parent_node,
        num_dims=2,
        pool_dims_i=7,
        pool_mode='average',
        pool_pads_i=1,
        pool_strides_i=1
    )
    cumulative_layer_num += 1
    log('classification_layer Pooling. cumulative_layer_num={n}'.format(
        n=cumulative_layer_num))

    fully_connected_node = lbann.FullyConnected(
        pooling_node,
        num_neurons=1000,
        has_bias=False
    )
    cumulative_layer_num += 1
    log('classification_layer FullyConnected. cumulative_layer_num={n}'.format(
        n=cumulative_layer_num))

    probabilities = lbann.Softmax(fully_connected_node)
    return probabilities
예제 #3
0
 def forward(self, x):
     self.instance += 1
     name = '{0}_instance{1}'.format(self.name, self.instance)
     y = lbann.FullyConnected(x,
                              weights=self.weights,
                              name=(name+'_fc' if self.activation else name),
                              data_layout=self.data_layout,
                              num_neurons=self.size,
                              has_bias=self.bias)
     if self.activation:
         return self.activation(y,
                                name=name+'_activation',
                                data_layout=self.data_layout)
     else:
         return y
예제 #4
0
파일: combo.py 프로젝트: benson31/lbann
 def forward(self, x):
     x_slice = lbann.Slice(x,
                           axis=0,
                           slice_points="0 921 4750 8579",
                           name='inp_slice')
     gene = self.geneT(lbann.Identity(x_slice))
     drug1 = self.drug1T(lbann.Identity(x_slice))
     drug2 = self.drug2T(lbann.Identity(x_slice))
     concat = self.concatT(
         lbann.Concatenation([gene, drug1, drug2],
                             name=self.name + 'concat'))
     response_fc = lbann.FullyConnected(concat,
                                        num_neurons=1,
                                        has_bias=True)
     return response_fc
예제 #5
0
    def _test_o2l_layer_Gemm(self, hasBias):
        M, N, K = (100, 200, 300)

        lbannFC = lbann.FullyConnected(lbann.Input(),
                                       num_neurons=N,
                                       has_bias=hasBias)

        inputShapes = {"x": [M, K]}
        paramShapes = {"W": [N, K]}
        if hasBias:
            paramShapes["b"] = [N]

        node = onnx.helper.make_node("Gemm",
                                     inputs=["x", "W"] +
                                     (["b"] if hasBias else []),
                                     outputs=["y"],
                                     transB=1)
        onnxFC = convertOnnxNode(node, inputShapes,
                                 paramShapes).fully_connected

        self._assertFields(lbannFC, onnxFC)
예제 #6
0
 def forward_generator(self, z, ps=None):
     x = lbann.FullyConnected(z,
                              num_neurons=np.prod(self.outc_dims),
                              has_bias=True,
                              device=self.g_device)
     x = lbann.Reshape(x,
                       dims=list2str(self.outc_dims),
                       name='gen_zin_reshape',
                       device=self.g_device)
     x = lbann.Relu(self.g_convT[0](x),
                    name='g_relu0',
                    parallel_strategy=ps,
                    device=self.g_device)
     x = lbann.Relu(self.g_convT[1](x),
                    name='g_relu1',
                    parallel_strategy=ps,
                    device=self.g_device)
     x = lbann.Relu(self.g_convT[2](x),
                    name='g_relu2',
                    parallel_strategy=ps,
                    device=self.g_device)
     return self.g_convT3(x)
예제 #7
0
def make_model(num_vertices=None,
               node_features=None,
               num_classes=None,
               dataset=None,
               kernel_type='GCN',
               callbacks=None,
               num_epochs=1):
    '''Construct a model DAG using one of the Graph Kernels

    Args:
        num_vertices (int): Number of vertices of each graph (default: None) 
        node_features (int): Number of features per noded (default: None)
        num_classes (int): Number of classes as targets (default: None)
        dataset (str): Preset data set to use. Either a datset parameter has to be 
                       supplied or all of num_vertices, node_features, and 
                       num_classes have to be supplied. (default: None) 
        kernel_type (str): Graph Kernel to use in model. Expected one of 
                            GCN, GIN, Graph, or GatedGraph (deafult: GCN)
        callbacks (list): Callbacks for the model. If set to None the model description, 
                          GPU usage, training_output, and timer is reported. 
                          (default: None)                    
        num_epochs (int): Number of epochs to run (default: 1)
    Returns:
        (lbann Model Object: A model object with the supplied callbacks, dataset
                               presets, and graph kernels. 
    '''

    assert num_vertices != dataset  #Ensure atleast one of the values is set

    if dataset is not None:
        assert num_vertices is None

        if dataset == 'MNIST':
            num_vertices = 75
            num_classes = 10
            node_features = 1

        elif dataset == 'PROTEINS':
            num_vertices = 100
            num_classes = 2
            node_features = 3
        else:
            raise Exception("Unkown Dataset")

    assert num_vertices is not None
    assert num_classes is not None
    assert node_features is not None

    #----------------------------------
    # Reshape and Slice Input Tensor
    #----------------------------------

    input_ = lbann.Input(target_mode='classification')

    # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes )
    # Input should have atleast two children since the target is classification

    data = lbann_Graph_Data(input_, num_vertices, node_features, num_classes)

    feature_matrix = data.x
    adj_matrix = data.adj
    target = data.y

    #----------------------------------
    # Perform Graph Convolution
    #----------------------------------

    if kernel_type == 'GIN':
        x = GINConvLayer(feature_matrix, adj_matrix)
    elif kernel_type == 'GCN':
        x = GCNConvLayer(feature_matrix, adj_matrix)
    elif kernel_type == 'Graph':
        x = GraphConvLayer(feature_matrix, adj_matrix)
    elif kernel_type == 'GatedGraph':
        x = GATConvLayer(feature_matrix, adj_matrix)
    else:
        ValueError(
            'Invalid Graph kernel specifier "{}" recieved. Expected one of:\
                    GIN,GCN,Graph or GatedGraph'.format(kernel_type))

    out_channel = x.shape[1]
    #----------------------------------
    # Apply Reduction on Node Features
    #----------------------------------

    average_vector = lbann.Constant(value=1 / num_vertices,
                                    num_neurons=str_list([1, num_vertices]),
                                    name="Average_Vector")
    x = x.get_mat(out_channel)

    x = lbann.MatMul(average_vector, x, name="Node_Feature_Reduction")

    # X is now a vector with output_channel dimensions

    x = lbann.Reshape(x, dims=str_list([out_channel]), name="Squeeze")
    x = lbann.FullyConnected(x, num_neurons=64, name="hidden_layer_1")
    x = lbann.Relu(x, name="hidden_layer_1_activation")
    x = lbann.FullyConnected(x,
                             num_neurons=num_classes,
                             name="Output_Fully_Connected")

    #----------------------------------
    # Loss Function and Accuracy s
    #----------------------------------

    probs = lbann.Softmax(x, name="Softmax")
    loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss")
    accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy")

    layers = lbann.traverse_layer_graph(input_)

    if callbacks is None:
        print_model = lbann.CallbackPrintModelDescription(
        )  #Prints initial Model after Setup
        training_output = lbann.CallbackPrint(
            interval=1,
            print_global_stat_only=False)  #Prints training progress
        gpu_usage = lbann.CallbackGPUMemoryUsage()
        timer = lbann.CallbackTimer()
        callbacks = [print_model, training_output, gpu_usage, timer]
    else:
        if isinstance(callbacks, list):
            callbacks = callbacks

    metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")]

    model = lbann.Model(num_epochs,
                        layers=layers,
                        objective_function=loss,
                        metrics=metrics,
                        callbacks=callbacks)
    return model
예제 #8
0
파일: lenet.py 프로젝트: oyamay/lbann
                  pool_mode="max")
x = lbann.Convolution(x,
                      num_dims=2,
                      num_output_channels=16,
                      num_groups=1,
                      conv_dims_i=5,
                      conv_strides_i=1,
                      conv_dilations_i=1,
                      has_bias=True)
x = lbann.Relu(x)
x = lbann.Pooling(x,
                  num_dims=2,
                  pool_dims_i=2,
                  pool_strides_i=2,
                  pool_mode="max")
x = lbann.FullyConnected(x, num_neurons=120, has_bias=True)
x = lbann.Relu(x)
x = lbann.FullyConnected(x, num_neurons=84, has_bias=True)
x = lbann.Relu(x)
x = lbann.FullyConnected(x, num_neurons=10, has_bias=True)
probs = lbann.Softmax(x)

# Loss function and accuracy
loss = lbann.CrossEntropy(probs, labels)
acc = lbann.CategoricalAccuracy(probs, labels)

# ----------------------------------
# Setup experiment
# ----------------------------------

# Setup model
예제 #9
0
파일: vae_mnist.py 프로젝트: benson31/lbann
    help=
    'Data reader options: \"numpy_npz_int16\", or \"mnist\" (default: data_reader_mnist.prototext)'
)
lbann.contrib.args.add_optimizer_arguments(parser, default_learning_rate=0.1)
args = parser.parse_args()

# Start of layers

# Construct layer graph
input_ = lbann.Input(name='data')
image = lbann.Split(input_, name='image')
dummy = lbann.Dummy(input_, name='dummy')

# Encoder
encode1 = lbann.FullyConnected(image,
                               name="encode1",
                               num_neurons=1000,
                               has_bias=True)

encode1neuron = lbann.Relu(encode1, name="encode1neuron")

encode2 = lbann.FullyConnected(encode1neuron,
                               name="encode2",
                               num_neurons=500,
                               has_bias=True)

encode2neuron = lbann.Relu(encode2, name="encode2neuron")

encode3 = lbann.FullyConnected(encode2neuron,
                               name="encode3",
                               num_neurons=250,
                               has_bias=True)
예제 #10
0
    'scheduler job name (default: data_readers/data_reader_candle_pilot1.prototext)'
)
lbann.contrib.args.add_optimizer_arguments(parser, default_learning_rate=0.1)
args = parser.parse_args()

# Start of layers

# Construct layer graph
input_ = lbann.Input(name='input', target_mode="reconstruction")
data = lbann.Split(input_, name='data')
dummy = lbann.Dummy(input_, name='dummy')

# Encoder
encode1 = lbann.FullyConnected(data,
                               name="encode1",
                               data_layout="model_parallel",
                               num_neurons=2000,
                               has_bias=True)

relu1 = lbann.Relu(encode1, name="relu1", data_layout="model_parallel")

encode2 = lbann.FullyConnected(relu1,
                               name="encode2",
                               data_layout="model_parallel",
                               num_neurons=1000,
                               has_bias=True)

relu2 = lbann.Relu(encode2, name="relu2", data_layout="model_parallel")

encode3 = lbann.FullyConnected(relu2,
                               name="encode3",
예제 #11
0
    num_labels = 1000
elif dataset == 'cifar10':
    num_labels = 10
else:
    print("Dataset must be cifar10 or imagenet. Try again.")
    exit()

# Construct layer graph
input_ = lbann.Input(name='input')
image = lbann.Identity(input_, name='images')
dummy = lbann.Dummy(input_, name='labels')

# Encoder
encode1 = lbann.FullyConnected(image,
                               name="encode1",
                               data_layout="model_parallel",
                               num_neurons=1000,
                               has_bias=True)

relu1 = lbann.Relu(encode1, name="relu1", data_layout="model_parallel")

dropout1 = lbann.Dropout(relu1,
                         name="dropout1",
                         data_layout="model_parallel",
                         keep_prob=0.8)

decode1 = lbann.FullyConnected(dropout1,
                               name="decode1",
                               data_layout="model_parallel",
                               hint_layer=image,
                               has_bias=True)
예제 #12
0
    'scheduler job name (default: data_readers/data_reader_candle_pilot1.prototext)'
)
lbann.contrib.args.add_optimizer_arguments(parser, default_learning_rate=0.1)
args = parser.parse_args()

# Start of layers

# Construct layer graph
input_ = lbann.Input(name='data')
finetunedata = lbann.Split(input_, name='finetunedata')
label = lbann.Split(input_, name='label')

# Encoder
encode1 = lbann.FullyConnected(finetunedata,
                               name="encode1",
                               data_layout="model_parallel",
                               num_neurons=2000,
                               has_bias=True)

relu1 = lbann.Relu(encode1, name="relu1", data_layout="model_parallel")

encode2 = lbann.FullyConnected(relu1,
                               name="encode2",
                               data_layout="model_parallel",
                               num_neurons=1000,
                               has_bias=True)

relu2 = lbann.Relu(encode2, name="relu2", data_layout="model_parallel")

encode3 = lbann.FullyConnected(relu2,
                               name="encode3",
예제 #13
0
def make_model(num_vertices=None,
               node_features=None,
               num_classes=None,
               kernel_type='GCN',
               callbacks=None,
               num_epochs=1):
    '''Construct a model DAG using one of the Graph Kernels

    Args:
        num_vertices (int): Number of vertices of each graph (default: None)
        node_features (int): Number of features per noded (default: None)
        num_classes (int): Number of classes as targets (default: None)

        kernel_type (str): Graph Kernel to use in model. Expected one of
                            GCN, GIN, Graph, or GatedGraph (deafult: GCN)
        callbacks (list): Callbacks for the model. If set to None the model description,
                          GPU usage, training_output, and timer is reported.
                          (default: None)
        num_epochs (int): Number of epochs to run (default: 1)
    Returns:
        (lbann.Model) : A model object with the supplied callbacks, dataset
                               presets, and graph kernels.
    '''

    num_vertices = 100
    num_classes = 2
    node_feature_size = 3
    max_edges = 415

    #----------------------------------
    # Reshape and Slice Input Tensor
    #----------------------------------

    input_ = lbann.Input(data_field='samples')

    # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes )

    data = Graph_Data_Parser(input_, num_vertices, node_feature_size,
                             max_edges, num_classes)

    feature_matrix = data['node_features']
    source_indices = data['source_indices']
    target_indices = data['target_indices']
    target = data['target']

    #----------------------------------
    # Select Graph Convolution
    #----------------------------------

    output_channels = 16
    graph_kernel_op = None
    if kernel_type == 'GIN':
        graph_kernel_op = GINConvLayer
    elif kernel_type == 'GCN':
        graph_kernel_op = GCNConvLayer
    elif kernel_type == 'Graph':
        graph_kernel_op = GraphConvLayer
    elif kernel_type == 'GatedGraph':
        graph_kernel_op = GATConvLayer
    else:
        raise ValueError(
            'Invalid Graph kernel specifier "{}" recieved. Expected one of:\
                    GIN,GCN,Graph or GatedGraph'.format(kernel_type))
    #----------------------------------
    # Perform Graph Convolution
    #----------------------------------

    x = graph_kernel_op(feature_matrix, source_indices, target_indices,
                        num_vertices, max_edges, node_feature_size,
                        output_channels)
    #----------------------------------
    # Apply Reduction on Node Features
    #----------------------------------

    average_vector = lbann.Constant(value=1 / num_vertices,
                                    num_neurons=str_list([1, num_vertices]),
                                    name="Average_Vector")

    x = lbann.MatMul(average_vector, x, name="Node_Feature_Reduction")

    # X is now a vector with output_channel dimensions

    x = lbann.Reshape(x, dims=str_list([output_channels]), name="Squeeze")
    x = lbann.FullyConnected(x, num_neurons=64, name="hidden_layer_1")
    x = lbann.Relu(x, name="hidden_layer_1_activation")
    x = lbann.FullyConnected(x,
                             num_neurons=num_classes,
                             name="Output_Fully_Connected")

    #----------------------------------
    # Loss Function and Accuracy s
    #----------------------------------

    probs = lbann.Softmax(x, name="Softmax")
    loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss")
    accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy")

    layers = lbann.traverse_layer_graph(input_)

    if callbacks is None:
        print_model = lbann.CallbackPrintModelDescription(
        )  #Prints initial Model after Setup
        training_output = lbann.CallbackPrint(
            interval=1,
            print_global_stat_only=False)  #Prints training progress
        gpu_usage = lbann.CallbackGPUMemoryUsage()
        timer = lbann.CallbackTimer()
        callbacks = [print_model, training_output, gpu_usage, timer]
    else:
        if isinstance(callbacks, list):
            callbacks = callbacks

    metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")]

    model = lbann.Model(num_epochs,
                        layers=layers,
                        objective_function=loss,
                        metrics=metrics,
                        callbacks=callbacks)
    return model
예제 #14
0
def make_model(num_vertices=None,
               node_features=None,
               num_classes=None,
               kernel_type='GCN',
               callbacks=None,
               num_epochs=1):
    '''Construct a model DAG using one of the Graph Kernels

    Args:
        num_vertices (int): Number of vertices of each graph (default: None)
        node_features (int): Number of features per noded (default: None)
        num_classes (int): Number of classes as targets (default: None)
        kernel_type (str): Graph Kernel to use in model. Expected one of
                            GCN, or Graph (deafult: GCN)
        callbacks (list): Callbacks for the model. If set to None the model description,
                          GPU usage, training_output, and timer is reported.
                          (default: None)
        num_epochs (int): Number of epochs to run (default: 1)
    Returns:
        (lbann Model Object: A model object with the supplied callbacks, dataset
                               presets, and graph kernels.
    '''

    num_vertices = 100
    num_classes = 2
    node_features = 3

    assert num_vertices is not None
    assert num_classes is not None
    assert node_features is not None

    #----------------------------------
    # Reshape and Slice Input Tensor
    #----------------------------------

    input_ = lbann.Input(data_field='samples')

    # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes )
    # input should have atleast two children since the target is classification

    sample_dims = num_vertices * node_features + (num_vertices**
                                                  2) + num_classes
    graph_dims = num_vertices * node_features + (num_vertices**2)
    feature_matrix_size = num_vertices * node_features

    graph_input = lbann.Slice(input_,
                              axis=0,
                              slice_points=str_list([
                                  0, feature_matrix_size, graph_dims,
                                  sample_dims
                              ]),
                              name="Graph_Input")

    feature_matrix = lbann.Reshape(graph_input,
                                   dims=str_list([num_vertices,
                                                  node_features]),
                                   name="Node_features")

    adj_matrix = lbann.Reshape(graph_input,
                               dims=str_list([num_vertices, num_vertices]),
                               name="Adj_Mat")

    target = lbann.Identity(graph_input, name="Target")
    target = lbann.Reshape(target, dims=str(num_classes))

    #----------------------------------
    # Perform Graph Convolution
    #----------------------------------

    if kernel_type == 'GCN':
        x = DGCN_layer(feature_matrix, adj_matrix, node_features)
    elif kernel_type == 'Graph':
        x = DGraph_Layer(feature_matrix, adj_matrix, node_features)
    else:
        ValueError(
            'Invalid Graph kernel specifier "{}" recieved. Expected one of:\
                    GCN or Graph'.format(kernel_type))
    out_channel = 256
    #----------------------------------
    # Apply Reduction on Node Features
    #----------------------------------

    average_vector = lbann.Constant(value=1 / num_vertices,
                                    num_neurons=str_list([1, num_vertices]),
                                    name="Average_Vector")
    x = lbann.MatMul(average_vector, x, name="Node_Feature_Reduction"
                     )  # X is now a vector with output_channel dimensions

    x = lbann.Reshape(x, dims=str_list([out_channel]), name="Squeeze")
    x = lbann.FullyConnected(x, num_neurons=256, name="hidden_layer_1")
    x = lbann.Relu(x, name="hidden_layer_1_activation")
    x = lbann.FullyConnected(x,
                             num_neurons=num_classes,
                             name="Output_Fully_Connected")

    #----------------------------------
    # Loss Function and Accuracy s
    #----------------------------------

    probs = lbann.Softmax(x, name="Softmax")
    loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss")
    accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy")

    layers = lbann.traverse_layer_graph(input_)
    if callbacks is None:
        print_model = lbann.CallbackPrintModelDescription(
        )  #Prints initial Model after Setup
        training_output = lbann.CallbackPrint(
            interval=1,
            print_global_stat_only=False)  #Prints training progress
        gpu_usage = lbann.CallbackGPUMemoryUsage()
        timer = lbann.CallbackTimer()
        callbacks = [print_model, training_output, gpu_usage, timer]
    else:
        if isinstance(callbacks, list):
            callbacks = callbacks
    metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")]

    model = lbann.Model(num_epochs,
                        layers=layers,
                        objective_function=loss,
                        metrics=metrics,
                        callbacks=callbacks)
    return model
예제 #15
0
                          pooling_layer=pool1.name)

deconv1 = lbann.Deconvolution(unpool1,
                              name="deconv1",
                              num_dims=2,
                              num_output_channels=3,
                              conv_dims='3 3',
                              conv_pads='0 0',
                              conv_strides='1 1',
                              has_bias=True,
                              has_vectors=True)

relu6 = lbann.Relu(deconv1, name="relu6")

decode1 = lbann.FullyConnected(relu6,
                               name="decode1",
                               hint_layer=image,
                               has_bias=True)

reconstruction = lbann.Sigmoid(decode1, name="reconstruction")

# Reconstruction
mean_squared_error = lbann.MeanSquaredError([reconstruction, image],
                                            name="mean_squared_error")

layer_term = lbann.LayerTerm(mean_squared_error)
scale_factor = lbann.L2WeightRegularization(scale=0.0005)
obj = lbann.ObjectiveFunction([layer_term, scale_factor])

metrics = [lbann.Metric(mean_squared_error, name=mean_squared_error.name)]

img_strategy = lbann.TrackSampleIDsStrategy(input_layer_name=input_.name,
예제 #16
0
def make_model(NUM_NODES,
               NUM_EDGES,
               NUM_NODES_FEATURES,
               NUM_EDGE_FEATURES,
               EMBEDDING_DIM,
               EDGE_EMBEDDING_DIM,
               NUM_OUT_FEATURES,
               NUM_EPOCHS,
               NUM_GROUPS=0):
    """ Creates an LBANN model for the OGB-LSC PPQM4M Dataset

	Args:
		NUM_NODES (int): The number of nodes in the largest graph in the dataset (51 for LSC-PPQM4M)
  	NUM_EDGES (int): The number of edges in the largest graph in the dataset (118 for LSC-PPQM4M)
  	NUM_NODES_FEATURES (int): The dimensionality of the input node features vector (9 for LSC-PPQM4M)
  	NUM_EDGE_FEATURES (int): The dimensionality of the input edge feature vectors (3 for LSC-PPQM4M)
  	EMBEDDING_DIM (int): The embedding dimensionality of the node feature vector
  	EDGE_EMBEDDING_DIM (int): The embedding dimensionality of the edge feature vector
  	NUM_OUT_FEATURES (int): The dimensionality of the node feature vectors after graph convolutions
  	NUM_EPOCHS (int): The number of epochs to train the network
  	NUM_GROUPS (int): The number of channel groups for distconv channelwise  fully connected layer (default : 0)
	Returns:
		(Model): lbann model object
		"""
    in_channel = EMBEDDING_DIM
    out_channel = NUM_OUT_FEATURES
    output_dimension = 1

    _input = lbann.Input(data_field='samples')
    node_feature_mat, neighbor_feature_mat, edge_feature_mat, edge_indices, target = \
     graph_data_splitter(_input,
               NUM_NODES,
               NUM_EDGES,
               NUM_NODES_FEATURES,
                NUM_EDGE_FEATURES,
               EMBEDDING_DIM,
               EDGE_EMBEDDING_DIM)

    x = NNConvLayer(node_feature_mat, neighbor_feature_mat, edge_feature_mat,
                    edge_indices, in_channel, out_channel, EDGE_EMBEDDING_DIM,
                    NUM_NODES, NUM_EDGES, NUM_GROUPS)

    for i, num_neurons in enumerate([256, 128, 32, 8], 1):
        x = lbann.FullyConnected(x,
                                 num_neurons=num_neurons,
                                 name="hidden_layer_{}".format(i))
        x = lbann.Relu(x, name='hidden_layer_{}_activation'.format(i))
    x = lbann.FullyConnected(x, num_neurons=output_dimension, name="output")

    loss = lbann.MeanAbsoluteError(x, target)

    layers = lbann.traverse_layer_graph(_input)
    training_output = lbann.CallbackPrint(interval=1,
                                          print_global_stat_only=False)
    gpu_usage = lbann.CallbackGPUMemoryUsage()
    timer = lbann.CallbackTimer()

    callbacks = [training_output, gpu_usage, timer]
    model = lbann.Model(NUM_EPOCHS,
                        layers=layers,
                        objective_function=loss,
                        callbacks=callbacks)
    return model
예제 #17
0
# Input data
images = lbann.Input(data_field='samples')
labels = lbann.Input(data_field='labels')
has_bias = False
x = images

if args.model == "mlp":
    for i, num_neurons in enumerate([1000, 1000, num_classes]):
        if i:
            x = lbann.Relu(x)

        x = lbann.FullyConnected(
            x,
            num_neurons=num_neurons,
            has_bias=has_bias,
            name="ip{}".format(i + 1),
            weights=[
                lbann.Weights(initializer=lbann.LeCunNormalInitializer())
            ])

elif args.model == "cnn":
    for i, num_channels in enumerate([20, 50]):
        x = lbann.Convolution(x,
                              num_dims=2,
                              num_output_channels=num_channels,
                              conv_dims_i=5,
                              conv_pads_i=0,
                              conv_strides_i=1,
                              has_bias=has_bias,
                              name="conv{}".format(i + 1))
        x = lbann.Relu(x)