def set_up_experiment(args, input_, probs, labels): # Set up objective function cross_entropy = lbann.CrossEntropy([probs, labels]) layers = list(lbann.traverse_layer_graph(input_)) weights = set() for l in layers: weights.update(l.weights) # scale = weight decay l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4) objective_function = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Set up model top1 = lbann.CategoricalAccuracy([probs, labels]) top5 = lbann.TopKCategoricalAccuracy([probs, labels], k=5) metrics = [lbann.Metric(top1, name='top-1 accuracy', unit='%'), lbann.Metric(top5, name='top-5 accuracy', unit='%')] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackDropFixedLearningRate( drop_epoch=[30, 60], amt=0.1)] model = lbann.Model(args.mini_batch_size, args.num_epochs, layers=layers, weights=weights, objective_function=objective_function, metrics=metrics, callbacks=callbacks) # Load data reader from prototext data_reader_proto = lbann.lbann_pb2.LbannPB() with open(args.data_reader, 'r') as f: txtf.Merge(f.read(), data_reader_proto) data_reader_proto = data_reader_proto.data_reader # Set up optimizer if args.optimizer == 'sgd': print('Creating sgd optimizer') optimizer = lbann.optimizer.SGD( learn_rate=args.optimizer_learning_rate, momentum=0.9, nesterov=True ) else: optimizer = lbann.contrib.args.create_optimizer(args) # Save prototext to args.prototext if args.prototext: lbann.proto.save_prototext(args.prototext, model=model, optimizer=optimizer, data_reader=data_reader_proto) return model, data_reader_proto, optimizer
def construct_model(): """Model description """ import lbann import lbann.modules fc = lbann.modules.FullyConnectedModule conv = lbann.modules.Convolution2dModule conv1 = conv(20, 3, stride=1, padding=1, name='conv1') conv2 = conv(20, 3, stride=1, padding=1, name='conv2') fc1 = fc(100, name='fc1') fc2 = fc(20, name='fc2') fc3 = fc(num_classes, name='fc3') # Layer graph input = lbann.Input(name='inp_tensor', target_mode='classification') inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0, dims - 1, dims]), name='inp_slice') xdata = lbann.Identity(inp_slice) ylabel = lbann.Identity(inp_slice, name='gt_y') #NHWC to NCHW x = lbann.Reshape(xdata, dims='14 13 13') x = conv2(conv1(x)) x = lbann.Reshape(x, dims='3380') x = lbann.Dropout(lbann.Relu(fc1(x)), keep_prob=0.5) x = lbann.Dropout(fc2(x), keep_prob=0.5) pred = lbann.Softmax(fc3(x)) gt_label = lbann.OneHot(ylabel, size=num_classes) loss = lbann.CrossEntropy([pred, gt_label], name='loss') acc = lbann.CategoricalAccuracy([pred, gt_label]) layers = list(lbann.traverse_layer_graph(input)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) obj = lbann.ObjectiveFunction(loss) callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] # Construct model num_epochs = 10 return lbann.Model(num_epochs, weights=weights, layers=layers, metrics=[lbann.Metric(acc, name='accuracy', unit='%')], objective_function=obj, callbacks=callbacks)
def set_up_experiment(args, input_, probs, labels): # Set up objective function cross_entropy = lbann.CrossEntropy([probs, labels]) layers = list(lbann.traverse_layer_graph(input_)) l2_reg_weights = set() for l in layers: if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected: l2_reg_weights.update(l.weights) # scale = weight decay l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=1e-4) objective_function = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Set up model top1 = lbann.CategoricalAccuracy([probs, labels]) top5 = lbann.TopKCategoricalAccuracy([probs, labels], k=5) metrics = [ lbann.Metric(top1, name='top-1 accuracy', unit='%'), lbann.Metric(top5, name='top-5 accuracy', unit='%') ] callbacks = [ lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackDropFixedLearningRate(drop_epoch=[30, 60], amt=0.1) ] model = lbann.Model(args.num_epochs, layers=layers, objective_function=objective_function, metrics=metrics, callbacks=callbacks) # Set up data reader data_reader = data.imagenet.make_data_reader(num_classes=args.num_classes) # Set up optimizer if args.optimizer == 'sgd': print('Creating sgd optimizer') optimizer = lbann.optimizer.SGD( learn_rate=args.optimizer_learning_rate, momentum=0.9, nesterov=True) else: optimizer = lbann.contrib.args.create_optimizer(args) # Setup trainer trainer = lbann.Trainer(mini_batch_size=args.mini_batch_size) return trainer, model, data_reader, optimizer
def make_model(num_vertices=None, node_features=None, num_classes=None, dataset=None, kernel_type='GCN', callbacks=None, num_epochs=1): '''Construct a model DAG using one of the Graph Kernels Args: num_vertices (int): Number of vertices of each graph (default: None) node_features (int): Number of features per noded (default: None) num_classes (int): Number of classes as targets (default: None) dataset (str): Preset data set to use. Either a datset parameter has to be supplied or all of num_vertices, node_features, and num_classes have to be supplied. (default: None) kernel_type (str): Graph Kernel to use in model. Expected one of GCN, GIN, Graph, or GatedGraph (deafult: GCN) callbacks (list): Callbacks for the model. If set to None the model description, GPU usage, training_output, and timer is reported. (default: None) num_epochs (int): Number of epochs to run (default: 1) Returns: (lbann Model Object: A model object with the supplied callbacks, dataset presets, and graph kernels. ''' assert num_vertices != dataset #Ensure atleast one of the values is set if dataset is not None: assert num_vertices is None if dataset == 'MNIST': num_vertices = 75 num_classes = 10 node_features = 1 elif dataset == 'PROTEINS': num_vertices = 100 num_classes = 2 node_features = 3 else: raise Exception("Unkown Dataset") assert num_vertices is not None assert num_classes is not None assert node_features is not None #---------------------------------- # Reshape and Slice Input Tensor #---------------------------------- input_ = lbann.Input(target_mode='classification') # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes ) # Input should have atleast two children since the target is classification data = lbann_Graph_Data(input_, num_vertices, node_features, num_classes) feature_matrix = data.x adj_matrix = data.adj target = data.y #---------------------------------- # Perform Graph Convolution #---------------------------------- if kernel_type == 'GIN': x = GINConvLayer(feature_matrix, adj_matrix) elif kernel_type == 'GCN': x = GCNConvLayer(feature_matrix, adj_matrix) elif kernel_type == 'Graph': x = GraphConvLayer(feature_matrix, adj_matrix) elif kernel_type == 'GatedGraph': x = GATConvLayer(feature_matrix, adj_matrix) else: ValueError( 'Invalid Graph kernel specifier "{}" recieved. Expected one of:\ GIN,GCN,Graph or GatedGraph'.format(kernel_type)) out_channel = x.shape[1] #---------------------------------- # Apply Reduction on Node Features #---------------------------------- average_vector = lbann.Constant(value=1 / num_vertices, num_neurons=str_list([1, num_vertices]), name="Average_Vector") x = x.get_mat(out_channel) x = lbann.MatMul(average_vector, x, name="Node_Feature_Reduction") # X is now a vector with output_channel dimensions x = lbann.Reshape(x, dims=str_list([out_channel]), name="Squeeze") x = lbann.FullyConnected(x, num_neurons=64, name="hidden_layer_1") x = lbann.Relu(x, name="hidden_layer_1_activation") x = lbann.FullyConnected(x, num_neurons=num_classes, name="Output_Fully_Connected") #---------------------------------- # Loss Function and Accuracy s #---------------------------------- probs = lbann.Softmax(x, name="Softmax") loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss") accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy") layers = lbann.traverse_layer_graph(input_) if callbacks is None: print_model = lbann.CallbackPrintModelDescription( ) #Prints initial Model after Setup training_output = lbann.CallbackPrint( interval=1, print_global_stat_only=False) #Prints training progress gpu_usage = lbann.CallbackGPUMemoryUsage() timer = lbann.CallbackTimer() callbacks = [print_model, training_output, gpu_usage, timer] else: if isinstance(callbacks, list): callbacks = callbacks metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")] model = lbann.Model(num_epochs, layers=layers, objective_function=loss, metrics=metrics, callbacks=callbacks) return model
def setup(data_reader_file, name='classifier', num_labels=200, mini_batch_size=128, num_epochs=1000, learning_rate=0.1, bn_statistics_group_size=2, fc_data_layout='model_parallel', warmup_epochs=50, learning_rate_drop_interval=50, learning_rate_drop_factor=0.25, checkpoint_interval=None): # Setup input data input = lbann.Input(target_mode = 'classification') images = lbann.Identity(input) labels = lbann.Identity(input) # Classification network head_cnn = modules.ResNet(bn_statistics_group_size=bn_statistics_group_size) class_fc = lbann.modules.FullyConnectedModule(num_labels, activation=lbann.Softmax, name=f'{name}_fc', data_layout=fc_data_layout) x = head_cnn(images) probs = class_fc(x) # Setup objective function cross_entropy = lbann.CrossEntropy([probs, labels]) l2_reg_weights = set() for l in lbann.traverse_layer_graph(input): if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected: l2_reg_weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=0.0002) obj = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Setup model metrics = [lbann.Metric(lbann.CategoricalAccuracy([probs, labels]), name='accuracy', unit='%')] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] if checkpoint_interval: callbacks.append( lbann.CallbackCheckpoint( checkpoint_dir='ckpt', checkpoint_epochs=5 ) ) # Learning rate schedules if warmup_epochs: callbacks.append( lbann.CallbackLinearGrowthLearningRate( target=learning_rate * mini_batch_size / 128, num_epochs=warmup_epochs ) ) if learning_rate_drop_factor: callbacks.append( lbann.CallbackDropFixedLearningRate( drop_epoch=list(range(0, num_epochs, learning_rate_drop_interval)), amt=learning_rate_drop_factor) ) # Construct model model = lbann.Model(num_epochs, layers=lbann.traverse_layer_graph(input), objective_function=obj, metrics=metrics, callbacks=callbacks) # Setup optimizer # opt = lbann.Adam(learn_rate=learning_rate, beta1=0.9, beta2=0.999, eps=1e-8) opt = lbann.SGD(learn_rate=learning_rate, momentum=0.9) # Load data reader from prototext data_reader_proto = lbann.lbann_pb2.LbannPB() with open(data_reader_file, 'r') as f: google.protobuf.text_format.Merge(f.read(), data_reader_proto) data_reader_proto = data_reader_proto.data_reader for reader_proto in data_reader_proto.reader: reader_proto.python.module_dir = os.path.dirname(os.path.realpath(__file__)) # Return experiment objects return model, data_reader_proto, opt
x = lbann.Relu(x) x = lbann.Pooling(x, num_dims=2, pool_dims_i=2, pool_strides_i=2, pool_mode="max") x = lbann.FullyConnected(x, num_neurons=120, has_bias=True) x = lbann.Relu(x) x = lbann.FullyConnected(x, num_neurons=84, has_bias=True) x = lbann.Relu(x) x = lbann.FullyConnected(x, num_neurons=10, has_bias=True) probs = lbann.Softmax(x) # Loss function and accuracy loss = lbann.CrossEntropy(probs, labels) acc = lbann.CategoricalAccuracy(probs, labels) # ---------------------------------- # Setup experiment # ---------------------------------- # Setup model mini_batch_size = 64 num_epochs = 20 model = lbann.Model(num_epochs, layers=lbann.traverse_layer_graph(input_), objective_function=loss, metrics=[lbann.Metric(acc, name='accuracy', unit='%')], callbacks=[ lbann.CallbackPrintModelDescription(), lbann.CallbackPrint(),
relu5 = lbann.Relu(encode5, name="relu5", data_layout="model_parallel") ip2 = lbann.FullyConnected(relu5, name="ip2", data_layout="model_parallel", num_neurons=2, has_bias=True) prob = lbann.Softmax(ip2, name="prob", data_layout="model_parallel") cross_entropy = lbann.CrossEntropy([prob, label], name="cross_entropy", data_layout="model_parallel") categorical_accuracy = lbann.CategoricalAccuracy([prob, label], name="categorical_accuracy", data_layout="model_parallel") layer_list = list(lbann.traverse_layer_graph(input_)) # Set up objective function layer_term = lbann.LayerTerm(cross_entropy) obj = lbann.ObjectiveFunction(layer_term) # Metrics metrics = [lbann.Metric(categorical_accuracy, name="accuracy")] # Callbacks callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] # Setup Model
help='exported prototext file', metavar='FILE') args = parser.parse_args() # Due to a data reader limitation, the actual model realization must be # hardcoded to 1000 labels for ImageNet. imagenet_labels = 1000 # Construct layer graph input = lbann.Input() images = lbann.Identity(input) labels = lbann.Identity(input) preds = lbann.models.AlexNet(imagenet_labels)(images) probs = lbann.Softmax(preds) cross_entropy = lbann.CrossEntropy([probs, labels]) top1 = lbann.CategoricalAccuracy([probs, labels]) top5 = lbann.TopKCategoricalAccuracy([probs, labels], k=5) layers = list(lbann.traverse_layer_graph(input)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=weights, scale=5e-4) obj = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Setup model metrics = [ lbann.Metric(top1, name='top-1 accuracy', unit='%'), lbann.Metric(top5, name='top-5 accuracy', unit='%') ]
def setup(num_patches=3, mini_batch_size=512, num_epochs=75, learning_rate=0.005, bn_statistics_group_size=2, fc_data_layout='model_parallel', warmup=True, checkpoint_interval=None): # Data dimensions patch_dims = patch_generator.patch_dims num_labels = patch_generator.num_labels(num_patches) # Extract tensors from data sample input = lbann.Input() slice_points = [0] for _ in range(num_patches): patch_size = functools.reduce(operator.mul, patch_dims) slice_points.append(slice_points[-1] + patch_size) slice_points.append(slice_points[-1] + num_labels) sample = lbann.Slice(input, slice_points=str_list(slice_points)) patches = [ lbann.Reshape(sample, dims=str_list(patch_dims)) for _ in range(num_patches) ] labels = lbann.Identity(sample) # Siamese network head_cnn = modules.ResNet( bn_statistics_group_size=bn_statistics_group_size) heads = [head_cnn(patch) for patch in patches] heads_concat = lbann.Concatenation(heads) # Classification network class_fc1 = modules.FcBnRelu( 4096, statistics_group_size=bn_statistics_group_size, name='siamese_class_fc1', data_layout=fc_data_layout) class_fc2 = modules.FcBnRelu( 4096, statistics_group_size=bn_statistics_group_size, name='siamese_class_fc2', data_layout=fc_data_layout) class_fc3 = lbann.modules.FullyConnectedModule(num_labels, activation=lbann.Softmax, name='siamese_class_fc3', data_layout=fc_data_layout) x = class_fc1(heads_concat) x = class_fc2(x) probs = class_fc3(x) # Setup objective function cross_entropy = lbann.CrossEntropy([probs, labels]) l2_reg_weights = set() for l in lbann.traverse_layer_graph(input): if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected: l2_reg_weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=0.0002) obj = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Setup model metrics = [ lbann.Metric(lbann.CategoricalAccuracy([probs, labels]), name='accuracy', unit='%') ] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] if checkpoint_interval: callbacks.append( lbann.CallbackCheckpoint(checkpoint_dir='ckpt', checkpoint_epochs=5)) # Learning rate schedules if warmup: callbacks.append( lbann.CallbackLinearGrowthLearningRate(target=learning_rate * mini_batch_size / 128, num_epochs=5)) callbacks.append( lbann.CallbackDropFixedLearningRate(drop_epoch=list(range(0, 100, 15)), amt=0.25)) # Construct model model = lbann.Model(num_epochs, layers=lbann.traverse_layer_graph(input), objective_function=obj, metrics=metrics, callbacks=callbacks) # Setup optimizer opt = lbann.SGD(learn_rate=learning_rate, momentum=0.9) # opt = lbann.Adam(learn_rate=learning_rate, beta1=0.9, beta2=0.999, eps=1e-8) # Setup data reader data_reader = make_data_reader(num_patches) # Return experiment objects return model, data_reader, opt
def make_model(num_vertices=None, node_features=None, num_classes=None, kernel_type='GCN', callbacks=None, num_epochs=1): '''Construct a model DAG using one of the Graph Kernels Args: num_vertices (int): Number of vertices of each graph (default: None) node_features (int): Number of features per noded (default: None) num_classes (int): Number of classes as targets (default: None) kernel_type (str): Graph Kernel to use in model. Expected one of GCN, GIN, Graph, or GatedGraph (deafult: GCN) callbacks (list): Callbacks for the model. If set to None the model description, GPU usage, training_output, and timer is reported. (default: None) num_epochs (int): Number of epochs to run (default: 1) Returns: (lbann.Model) : A model object with the supplied callbacks, dataset presets, and graph kernels. ''' num_vertices = 100 num_classes = 2 node_feature_size = 3 max_edges = 415 #---------------------------------- # Reshape and Slice Input Tensor #---------------------------------- input_ = lbann.Input(data_field='samples') # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes ) data = Graph_Data_Parser(input_, num_vertices, node_feature_size, max_edges, num_classes) feature_matrix = data['node_features'] source_indices = data['source_indices'] target_indices = data['target_indices'] target = data['target'] #---------------------------------- # Select Graph Convolution #---------------------------------- output_channels = 16 graph_kernel_op = None if kernel_type == 'GIN': graph_kernel_op = GINConvLayer elif kernel_type == 'GCN': graph_kernel_op = GCNConvLayer elif kernel_type == 'Graph': graph_kernel_op = GraphConvLayer elif kernel_type == 'GatedGraph': graph_kernel_op = GATConvLayer else: raise ValueError( 'Invalid Graph kernel specifier "{}" recieved. Expected one of:\ GIN,GCN,Graph or GatedGraph'.format(kernel_type)) #---------------------------------- # Perform Graph Convolution #---------------------------------- x = graph_kernel_op(feature_matrix, source_indices, target_indices, num_vertices, max_edges, node_feature_size, output_channels) #---------------------------------- # Apply Reduction on Node Features #---------------------------------- average_vector = lbann.Constant(value=1 / num_vertices, num_neurons=str_list([1, num_vertices]), name="Average_Vector") x = lbann.MatMul(average_vector, x, name="Node_Feature_Reduction") # X is now a vector with output_channel dimensions x = lbann.Reshape(x, dims=str_list([output_channels]), name="Squeeze") x = lbann.FullyConnected(x, num_neurons=64, name="hidden_layer_1") x = lbann.Relu(x, name="hidden_layer_1_activation") x = lbann.FullyConnected(x, num_neurons=num_classes, name="Output_Fully_Connected") #---------------------------------- # Loss Function and Accuracy s #---------------------------------- probs = lbann.Softmax(x, name="Softmax") loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss") accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy") layers = lbann.traverse_layer_graph(input_) if callbacks is None: print_model = lbann.CallbackPrintModelDescription( ) #Prints initial Model after Setup training_output = lbann.CallbackPrint( interval=1, print_global_stat_only=False) #Prints training progress gpu_usage = lbann.CallbackGPUMemoryUsage() timer = lbann.CallbackTimer() callbacks = [print_model, training_output, gpu_usage, timer] else: if isinstance(callbacks, list): callbacks = callbacks metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")] model = lbann.Model(num_epochs, layers=layers, objective_function=loss, metrics=metrics, callbacks=callbacks) return model
num_labels, bn_statistics_group_size=args.bn_statistics_group_size) else: # Some other Wide ResNet. resnet = resnet_variant_dict[args.resnet]( num_labels, bn_statistics_group_size=args.bn_statistics_group_size, width=args.width) # Construct layer graph input_ = lbann.Input(name='input') images = lbann.Identity(input_, name='images') labels = lbann.Identity(input_, name='labels') preds = resnet(images) probs = lbann.Softmax(preds) cross_entropy = lbann.CrossEntropy(probs, labels) top1 = lbann.CategoricalAccuracy(probs, labels, name='louise') top5 = lbann.TopKCategoricalAccuracy(probs, labels, k=5) layer_list = list(lbann.traverse_layer_graph(input_)) # Setup objective function l2_reg_weights = set() for l in layer_list: if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected: l2_reg_weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=1e-4) obj = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Setup model metrics = [ lbann.Metric(top1, name='top-1 accuracy', unit='%'), lbann.Metric(top5, name='top-5 accuracy', unit='%')
def make_model(num_vertices=None, node_features=None, num_classes=None, kernel_type='GCN', callbacks=None, num_epochs=1): '''Construct a model DAG using one of the Graph Kernels Args: num_vertices (int): Number of vertices of each graph (default: None) node_features (int): Number of features per noded (default: None) num_classes (int): Number of classes as targets (default: None) kernel_type (str): Graph Kernel to use in model. Expected one of GCN, or Graph (deafult: GCN) callbacks (list): Callbacks for the model. If set to None the model description, GPU usage, training_output, and timer is reported. (default: None) num_epochs (int): Number of epochs to run (default: 1) Returns: (lbann Model Object: A model object with the supplied callbacks, dataset presets, and graph kernels. ''' num_vertices = 100 num_classes = 2 node_features = 3 assert num_vertices is not None assert num_classes is not None assert node_features is not None #---------------------------------- # Reshape and Slice Input Tensor #---------------------------------- input_ = lbann.Input(data_field='samples') # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes ) # input should have atleast two children since the target is classification sample_dims = num_vertices * node_features + (num_vertices** 2) + num_classes graph_dims = num_vertices * node_features + (num_vertices**2) feature_matrix_size = num_vertices * node_features graph_input = lbann.Slice(input_, axis=0, slice_points=str_list([ 0, feature_matrix_size, graph_dims, sample_dims ]), name="Graph_Input") feature_matrix = lbann.Reshape(graph_input, dims=str_list([num_vertices, node_features]), name="Node_features") adj_matrix = lbann.Reshape(graph_input, dims=str_list([num_vertices, num_vertices]), name="Adj_Mat") target = lbann.Identity(graph_input, name="Target") target = lbann.Reshape(target, dims=str(num_classes)) #---------------------------------- # Perform Graph Convolution #---------------------------------- if kernel_type == 'GCN': x = DGCN_layer(feature_matrix, adj_matrix, node_features) elif kernel_type == 'Graph': x = DGraph_Layer(feature_matrix, adj_matrix, node_features) else: ValueError( 'Invalid Graph kernel specifier "{}" recieved. Expected one of:\ GCN or Graph'.format(kernel_type)) out_channel = 256 #---------------------------------- # Apply Reduction on Node Features #---------------------------------- average_vector = lbann.Constant(value=1 / num_vertices, num_neurons=str_list([1, num_vertices]), name="Average_Vector") x = lbann.MatMul(average_vector, x, name="Node_Feature_Reduction" ) # X is now a vector with output_channel dimensions x = lbann.Reshape(x, dims=str_list([out_channel]), name="Squeeze") x = lbann.FullyConnected(x, num_neurons=256, name="hidden_layer_1") x = lbann.Relu(x, name="hidden_layer_1_activation") x = lbann.FullyConnected(x, num_neurons=num_classes, name="Output_Fully_Connected") #---------------------------------- # Loss Function and Accuracy s #---------------------------------- probs = lbann.Softmax(x, name="Softmax") loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss") accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy") layers = lbann.traverse_layer_graph(input_) if callbacks is None: print_model = lbann.CallbackPrintModelDescription( ) #Prints initial Model after Setup training_output = lbann.CallbackPrint( interval=1, print_global_stat_only=False) #Prints training progress gpu_usage = lbann.CallbackGPUMemoryUsage() timer = lbann.CallbackTimer() callbacks = [print_model, training_output, gpu_usage, timer] else: if isinstance(callbacks, list): callbacks = callbacks metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")] model = lbann.Model(num_epochs, layers=layers, objective_function=loss, metrics=metrics, callbacks=callbacks) return model