def make_model( motif_size, walk_length, num_vertices, embed_dim, learn_rate, num_epochs, embeddings_dir, ): # Layer graph input_ = lbann.Slice( lbann.Input(data_field='samples'), slice_points=str_list([0, motif_size, motif_size+walk_length]), ) motif_indices = lbann.Identity(input_) walk_indices = lbann.Identity(input_) gan = model.gan.CommunityGAN( num_vertices, motif_size, embed_dim, learn_rate, ) loss, real_disc_prob, fake_disc_prob, gen_prob = gan( motif_indices, motif_size, walk_indices, walk_length, ) # Metrics metrics = [ lbann.Metric(real_disc_prob, name='D(real)'), lbann.Metric(fake_disc_prob, name='D(fake)'), lbann.Metric(gen_prob, name='G'), ] # Callbacks callbacks = [ lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackDumpWeights(directory=embeddings_dir, epoch_interval=num_epochs), ] # Perform computation at double precision for l in lbann.traverse_layer_graph(input_): l.datatype = lbann.DataType.DOUBLE for w in l.weights: w.datatype = lbann.DataType.DOUBLE # Contruct model return lbann.Model( num_epochs, layers=lbann.traverse_layer_graph(input_), objective_function=loss, metrics=metrics, callbacks=callbacks, )
def set_up_experiment(args, input_, probs, labels): # Set up objective function cross_entropy = lbann.CrossEntropy([probs, labels]) layers = list(lbann.traverse_layer_graph(input_)) weights = set() for l in layers: weights.update(l.weights) # scale = weight decay l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4) objective_function = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Set up model top1 = lbann.CategoricalAccuracy([probs, labels]) top5 = lbann.TopKCategoricalAccuracy([probs, labels], k=5) metrics = [lbann.Metric(top1, name='top-1 accuracy', unit='%'), lbann.Metric(top5, name='top-5 accuracy', unit='%')] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackDropFixedLearningRate( drop_epoch=[30, 60], amt=0.1)] model = lbann.Model(args.mini_batch_size, args.num_epochs, layers=layers, weights=weights, objective_function=objective_function, metrics=metrics, callbacks=callbacks) # Load data reader from prototext data_reader_proto = lbann.lbann_pb2.LbannPB() with open(args.data_reader, 'r') as f: txtf.Merge(f.read(), data_reader_proto) data_reader_proto = data_reader_proto.data_reader # Set up optimizer if args.optimizer == 'sgd': print('Creating sgd optimizer') optimizer = lbann.optimizer.SGD( learn_rate=args.optimizer_learning_rate, momentum=0.9, nesterov=True ) else: optimizer = lbann.contrib.args.create_optimizer(args) # Save prototext to args.prototext if args.prototext: lbann.proto.save_prototext(args.prototext, model=model, optimizer=optimizer, data_reader=data_reader_proto) return model, data_reader_proto, optimizer
def set_up_experiment(args, input_, probs, labels): # Set up objective function cross_entropy = lbann.CrossEntropy([probs, labels]) layers = list(lbann.traverse_layer_graph(input_)) l2_reg_weights = set() for l in layers: if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected: l2_reg_weights.update(l.weights) # scale = weight decay l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=1e-4) objective_function = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Set up model top1 = lbann.CategoricalAccuracy([probs, labels]) top5 = lbann.TopKCategoricalAccuracy([probs, labels], k=5) metrics = [ lbann.Metric(top1, name='top-1 accuracy', unit='%'), lbann.Metric(top5, name='top-5 accuracy', unit='%') ] callbacks = [ lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackDropFixedLearningRate(drop_epoch=[30, 60], amt=0.1) ] model = lbann.Model(args.num_epochs, layers=layers, objective_function=objective_function, metrics=metrics, callbacks=callbacks) # Set up data reader data_reader = data.imagenet.make_data_reader(num_classes=args.num_classes) # Set up optimizer if args.optimizer == 'sgd': print('Creating sgd optimizer') optimizer = lbann.optimizer.SGD( learn_rate=args.optimizer_learning_rate, momentum=0.9, nesterov=True) else: optimizer = lbann.contrib.args.create_optimizer(args) # Setup trainer trainer = lbann.Trainer(mini_batch_size=args.mini_batch_size) return trainer, model, data_reader, optimizer
def construct_model(): """Construct LBANN model. Pilot1 Combo model """ import lbann # Layer graph data = lbann.Input(data_field='samples') responses = lbann.Input(data_field='responses') pred = combo.Combo()(data) mse = lbann.MeanSquaredError([responses, pred]) SS_res = lbann.Reduction(lbann.Square(lbann.Subtract(responses, pred)), mode='sum') #SS_tot = var(x) = mean((x-mean(x))^2) mini_batch_size = lbann.MiniBatchSize() mean = lbann.Divide(lbann.BatchwiseReduceSum(responses), mini_batch_size) SS_tot = lbann.Divide( lbann.BatchwiseReduceSum(lbann.Square(lbann.Subtract(responses, mean))), mini_batch_size) eps = lbann.Constant(value=1e-07, hint_layer=SS_tot) r2 = lbann.Subtract(lbann.Constant(value=1, num_neurons='1'), lbann.Divide(SS_res, lbann.Add(SS_tot, eps))) metrics = [lbann.Metric(mse, name='mse')] metrics.append(lbann.Metric(r2, name='r2')) callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] # Construct model num_epochs = 100 layers = list(lbann.traverse_layer_graph([data, responses])) return lbann.Model(num_epochs, layers=layers, metrics=metrics, objective_function=mse, callbacks=callbacks)
def construct_model(): """Model description """ import lbann import lbann.modules fc = lbann.modules.FullyConnectedModule conv = lbann.modules.Convolution2dModule conv1 = conv(20, 3, stride=1, padding=1, name='conv1') conv2 = conv(20, 3, stride=1, padding=1, name='conv2') fc1 = fc(100, name='fc1') fc2 = fc(20, name='fc2') fc3 = fc(num_classes, name='fc3') # Layer graph input = lbann.Input(name='inp_tensor', target_mode='classification') inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0, dims - 1, dims]), name='inp_slice') xdata = lbann.Identity(inp_slice) ylabel = lbann.Identity(inp_slice, name='gt_y') #NHWC to NCHW x = lbann.Reshape(xdata, dims='14 13 13') x = conv2(conv1(x)) x = lbann.Reshape(x, dims='3380') x = lbann.Dropout(lbann.Relu(fc1(x)), keep_prob=0.5) x = lbann.Dropout(fc2(x), keep_prob=0.5) pred = lbann.Softmax(fc3(x)) gt_label = lbann.OneHot(ylabel, size=num_classes) loss = lbann.CrossEntropy([pred, gt_label], name='loss') acc = lbann.CategoricalAccuracy([pred, gt_label]) layers = list(lbann.traverse_layer_graph(input)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) obj = lbann.ObjectiveFunction(loss) callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] # Construct model num_epochs = 10 return lbann.Model(num_epochs, weights=weights, layers=layers, metrics=[lbann.Metric(acc, name='accuracy', unit='%')], objective_function=obj, callbacks=callbacks)
def make_model( data_dim, latent_dim, num_epochs, ): # Layer graph data = lbann.Input(data_field='samples') autoencoder = model.autoencoder.FullyConnectedAutoencoder( data_dim, latent_dim, ) reconstructed = autoencoder(data) loss = lbann.MeanSquaredError(data, reconstructed) # Metrics metrics = [ lbann.Metric(loss, name='loss'), ] # Callbacks callbacks = [ lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackDumpWeights(directory='weights', epoch_interval=num_epochs), ] # Contruct model return lbann.Model( num_epochs, layers=lbann.traverse_layer_graph(loss), objective_function=loss, metrics=metrics, callbacks=callbacks, )
def construct_model(num_epochs,mcr,spectral_loss,save_batch_interval): """Construct LBANN model. """ import lbann # Layer graph input = lbann.Input(target_mode='N/A',name='inp_img') ### Create expected labels for real and fake data (with label flipping = 0.01) prob_flip=0.01 label_flip_rand = lbann.Uniform(min=0,max=1, neuron_dims='1') label_flip_prob = lbann.Constant(value=prob_flip, num_neurons='1') ones = lbann.GreaterEqual(label_flip_rand,label_flip_prob, name='is_real') zeros = lbann.LogicalNot(ones,name='is_fake') gen_ones=lbann.Constant(value=1.0,num_neurons='1')## All ones: no flip. Input for training Generator. #============================================== ### Implement GAN ##Create the noise vector z = lbann.Reshape(lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims="64", name='noise_vec'),dims='1 64') ## Creating the GAN object and implementing forward pass for both networks ### d1_real, d1_fake, d_adv, gen_img, img = ExaGAN.CosmoGAN(mcr)(input,z,mcr) #============================================== ### Compute quantities for adding to Loss and Metrics d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,ones],name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zeros],name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,gen_ones],name='d_adv_bce') #img_loss = lbann.MeanSquaredError([gen_img,img]) #l1_loss = lbann.L1Norm(lbann.WeightedSum([gen_img,img], scaling_factors="1 -1")) #============================================== ### Set up source and destination layers layers = list(lbann.traverse_layer_graph(input)) weights = set() src_layers,dst_layers = [],[] for l in layers: if(l.weights and "disc1" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2, analogous to discrim.trainable=False in Keras if(l.weights and "disc2" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) #l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4) #============================================== ### Define Loss and Metrics #Define loss (Objective function) loss_list=[d1_real_bce,d1_fake_bce,d_adv_bce] ## Usual GAN loss function # loss_list=[d1_real_bce,d1_fake_bce] ## skipping adversarial loss for G for testing spectral loss if spectral_loss: dft_gen_img = lbann.DFTAbs(gen_img) dft_img = lbann.StopGradient(lbann.DFTAbs(img)) spec_loss = lbann.Log(lbann.MeanSquaredError(dft_gen_img, dft_img)) loss_list.append(lbann.LayerTerm(spec_loss, scale=8.0)) loss = lbann.ObjectiveFunction(loss_list) #Define metrics metrics = [lbann.Metric(d1_real_bce,name='d_real'),lbann.Metric(d1_fake_bce, name='d_fake'), lbann.Metric(d_adv_bce,name='gen_adv')] if spectral_loss: metrics.append(lbann.Metric(spec_loss,name='spec_loss')) #============================================== ### Define callbacks list callbacks_list=[] dump_outputs=True save_model=False print_model=False callbacks_list.append(lbann.CallbackPrint()) callbacks_list.append(lbann.CallbackTimer()) callbacks_list.append(lbann.CallbackReplaceWeights(source_layers=list2str(src_layers), destination_layers=list2str(dst_layers),batch_interval=1)) if dump_outputs: #callbacks_list.append(lbann.CallbackDumpOutputs(layers='inp_img gen_img_instance1_activation', execution_modes='train validation', directory='dump_outs',batch_interval=save_batch_interval,format='npy')) callbacks_list.append(lbann.CallbackDumpOutputs(layers='gen_img_instance1_activation', execution_modes='train validation', directory='dump_outs',batch_interval=save_batch_interval,format='npy')) if save_model : callbacks_list.append(lbann.CallbackSaveModel(dir='models')) if print_model: callbacks_list.append(lbann.CallbackPrintModelDescription()) ### Construct model return lbann.Model(num_epochs, weights=weights, layers=layers, metrics=metrics, objective_function=loss, callbacks=callbacks_list)
depth_splits_pooling_id = 5 - (math.log2(args.depth_groups) - 2) if pooling_id == depth_splits_pooling_id: parallel_strategy = dict(parallel_strategy.items()) parallel_strategy['depth_splits'] = 1 elif layer_name == 'Dropout': dropout_id += 1 if dropout_id == args.gather_dropout_id: break layer.parallel_strategy = parallel_strategy # Set up model metrics = [lbann.Metric(mse, name='MSE', unit='')] callbacks = [ lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackGPUMemoryUsage(), lbann.CallbackDumpOutputs(directory='dump_acts/', layers=' '.join([preds.name, secrets.name]), execution_modes='test'), lbann.CallbackProfiler(skip_init=True) ] # # TODO: Use polynomial learning rate decay (https://github.com/LLNL/lbann/issues/1581) # callbacks.append(lbann.CallbackPolyLearningRate( # power=1.0, # num_epochs=100, # end_lr=1e-7)) model = lbann.Model(epochs=args.num_epochs,
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None, 'should be training seq len + bos + eos' print("sequence length is {}, which is training sequence len + bos + eos".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Input(data_field='samples',name='inp_data') #Note input assumes to come from encoder script concatenation of input smiles + z inp_slice = lbann.Slice(input_, axis=0, slice_points=str_list([0, sequence_length, sequence_length+run_args.z_dim]), name='inp_slice') inp_smile = lbann.Identity(inp_slice,name='inp_smile') z = lbann.Identity(inp_slice, name='z') wae_loss= [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None save_output = True if run_args.dump_outputs_dir else False print("save output? ", save_output, "out dir ", run_args.dump_outputs_dir) #uncomment below for random sampling #z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=str(run_args.z_dim)) x = lbann.Slice(inp_smile, slice_points=str_list([0, input_feature_dims])) x = lbann.Identity(x) waemodel = molwae.MolWAE(input_feature_dims, dictionary_size, embedding_size, pad_index,run_args.z_dim,save_output=save_output) x_emb = lbann.Embedding( x, num_embeddings=waemodel.dictionary_size, embedding_dim=waemodel.embedding_size, name='emb', weights=waemodel.emb_weights ) pred, arg_max = waemodel.forward_decoder(x_emb,z) recon = waemodel.compute_loss(x, pred) wae_loss.append(recon) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) #l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4) #wae_loss.append(l2_reg) print("LEN wae loss ", len(wae_loss)) obj = lbann.ObjectiveFunction(wae_loss) # Initialize check metric callback metrics = [lbann.Metric(recon, name='recon')] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] #Dump output (activation) for post processing pred_tensor = lbann.Concatenation(arg_max, name='pred_tensor') conc_out = lbann.Concatenation([input_,pred_tensor], name='conc_out') callbacks.append(lbann.CallbackDumpOutputs(batch_interval=run_args.dump_outputs_interval, execution_modes='test', directory=run_args.dump_outputs_dir, layers=f'{conc_out.name}')) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
def setup(data_reader_file, name='classifier', num_labels=200, mini_batch_size=128, num_epochs=1000, learning_rate=0.1, bn_statistics_group_size=2, fc_data_layout='model_parallel', warmup_epochs=50, learning_rate_drop_interval=50, learning_rate_drop_factor=0.25, checkpoint_interval=None): # Setup input data input = lbann.Input(target_mode = 'classification') images = lbann.Identity(input) labels = lbann.Identity(input) # Classification network head_cnn = modules.ResNet(bn_statistics_group_size=bn_statistics_group_size) class_fc = lbann.modules.FullyConnectedModule(num_labels, activation=lbann.Softmax, name=f'{name}_fc', data_layout=fc_data_layout) x = head_cnn(images) probs = class_fc(x) # Setup objective function cross_entropy = lbann.CrossEntropy([probs, labels]) l2_reg_weights = set() for l in lbann.traverse_layer_graph(input): if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected: l2_reg_weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=0.0002) obj = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Setup model metrics = [lbann.Metric(lbann.CategoricalAccuracy([probs, labels]), name='accuracy', unit='%')] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] if checkpoint_interval: callbacks.append( lbann.CallbackCheckpoint( checkpoint_dir='ckpt', checkpoint_epochs=5 ) ) # Learning rate schedules if warmup_epochs: callbacks.append( lbann.CallbackLinearGrowthLearningRate( target=learning_rate * mini_batch_size / 128, num_epochs=warmup_epochs ) ) if learning_rate_drop_factor: callbacks.append( lbann.CallbackDropFixedLearningRate( drop_epoch=list(range(0, num_epochs, learning_rate_drop_interval)), amt=learning_rate_drop_factor) ) # Construct model model = lbann.Model(num_epochs, layers=lbann.traverse_layer_graph(input), objective_function=obj, metrics=metrics, callbacks=callbacks) # Setup optimizer # opt = lbann.Adam(learn_rate=learning_rate, beta1=0.9, beta2=0.999, eps=1e-8) opt = lbann.SGD(learn_rate=learning_rate, momentum=0.9) # Load data reader from prototext data_reader_proto = lbann.lbann_pb2.LbannPB() with open(data_reader_file, 'r') as f: google.protobuf.text_format.Merge(f.read(), data_reader_proto) data_reader_proto = data_reader_proto.data_reader for reader_proto in data_reader_proto.reader: reader_proto.python.module_dir = os.path.dirname(os.path.realpath(__file__)) # Return experiment objects return model, data_reader_proto, opt
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None print("sequence length is {}".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Identity(lbann.Input(name='inp', target_mode="N/A"), name='inp1') vae_loss = [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None kl, recon = molvae.MolVAE(input_feature_dims, dictionary_size, embedding_size, pad_index)(input_) vae_loss.append(kl) vae_loss.append(recon) print("LEN vae loss ", len(vae_loss)) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() for l in layers: weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=weights, scale=5e-4) obj = lbann.ObjectiveFunction(vae_loss) # Initialize check metric callback metrics = [ lbann.Metric(kl, name='kl_loss'), lbann.Metric(recon, name='recon') ] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] if (run_args.dump_weights_interval > 0): callbacks.append( lbann.CallbackDumpWeights( directory=run_args.dump_weights_dir, epoch_interval=run_args.dump_weights_interval)) if (run_args.ltfb): send_name = ('' if run_args.weights_to_send == 'All' else run_args.weights_to_send) #hack for Merlin empty string weights_to_ex = [w.name for w in weights if send_name in w.name] print("LTFB Weights to exchange ", weights_to_ex) callbacks.append( lbann.CallbackLTFB(batch_interval=run_args.ltfb_batch_interval, metric='recon', weights=list2str(weights_to_ex), low_score_wins=True, exchange_hyperparameters=True)) if (run_args.warmup): callbacks.append( lbann.CallbackLinearGrowthLearningRate(target=run_args.lr / 512 * run_args.batch_size, num_epochs=5)) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
parallel_strategy = get_parallel_strategy_args( sample_groups=args.mini_batch_size, depth_groups=args.depth_groups) # Construct layer graph input = lbann.Input(target_mode='label_reconstruction') volume = lbann.Identity(input) output = UNet3D()(volume) segmentation = lbann.Identity(input) ce = lbann.CrossEntropy([output, segmentation], use_labels=True) obj = lbann.ObjectiveFunction([ce]) layers = list(lbann.traverse_layer_graph(input)) for l in layers: l.parallel_strategy = parallel_strategy # Setup model metrics = [lbann.Metric(ce, name='CE', unit='')] callbacks = [ lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackGPUMemoryUsage(), lbann.CallbackProfiler(skip_init=True), ] # # TODO: Use polynomial learning rate decay (https://github.com/LLNL/lbann/issues/1581) # callbacks.append( # lbann.CallbackPolyLearningRate( # power=1.0, # num_epochs=100, # end_lr=1e-5)) model = lbann.Model( epochs=args.num_epochs, layers=layers,
cross_entropy = lbann.CrossEntropy([prob, label], name="cross_entropy", data_layout="model_parallel") categorical_accuracy = lbann.CategoricalAccuracy([prob, label], name="categorical_accuracy", data_layout="model_parallel") layer_list = list(lbann.traverse_layer_graph(input_)) # Set up objective function layer_term = lbann.LayerTerm(cross_entropy) obj = lbann.ObjectiveFunction(layer_term) # Metrics metrics = [lbann.Metric(categorical_accuracy, name="accuracy")] # Callbacks callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] # Setup Model model = lbann.Model(args.num_epochs, layers=layer_list, objective_function=obj, metrics=metrics, callbacks=callbacks, summary_dir=".") # Setup optimizer opt = lbann.contrib.args.create_optimizer(args)
def setup(num_patches=3, mini_batch_size=512, num_epochs=75, learning_rate=0.005, bn_statistics_group_size=2, fc_data_layout='model_parallel', warmup=True, checkpoint_interval=None): # Data dimensions patch_dims = patch_generator.patch_dims num_labels = patch_generator.num_labels(num_patches) # Extract tensors from data sample input = lbann.Input() slice_points = [0] for _ in range(num_patches): patch_size = functools.reduce(operator.mul, patch_dims) slice_points.append(slice_points[-1] + patch_size) slice_points.append(slice_points[-1] + num_labels) sample = lbann.Slice(input, slice_points=str_list(slice_points)) patches = [ lbann.Reshape(sample, dims=str_list(patch_dims)) for _ in range(num_patches) ] labels = lbann.Identity(sample) # Siamese network head_cnn = modules.ResNet( bn_statistics_group_size=bn_statistics_group_size) heads = [head_cnn(patch) for patch in patches] heads_concat = lbann.Concatenation(heads) # Classification network class_fc1 = modules.FcBnRelu( 4096, statistics_group_size=bn_statistics_group_size, name='siamese_class_fc1', data_layout=fc_data_layout) class_fc2 = modules.FcBnRelu( 4096, statistics_group_size=bn_statistics_group_size, name='siamese_class_fc2', data_layout=fc_data_layout) class_fc3 = lbann.modules.FullyConnectedModule(num_labels, activation=lbann.Softmax, name='siamese_class_fc3', data_layout=fc_data_layout) x = class_fc1(heads_concat) x = class_fc2(x) probs = class_fc3(x) # Setup objective function cross_entropy = lbann.CrossEntropy([probs, labels]) l2_reg_weights = set() for l in lbann.traverse_layer_graph(input): if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected: l2_reg_weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=0.0002) obj = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Setup model metrics = [ lbann.Metric(lbann.CategoricalAccuracy([probs, labels]), name='accuracy', unit='%') ] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] if checkpoint_interval: callbacks.append( lbann.CallbackCheckpoint(checkpoint_dir='ckpt', checkpoint_epochs=5)) # Learning rate schedules if warmup: callbacks.append( lbann.CallbackLinearGrowthLearningRate(target=learning_rate * mini_batch_size / 128, num_epochs=5)) callbacks.append( lbann.CallbackDropFixedLearningRate(drop_epoch=list(range(0, 100, 15)), amt=0.25)) # Construct model model = lbann.Model(num_epochs, layers=lbann.traverse_layer_graph(input), objective_function=obj, metrics=metrics, callbacks=callbacks) # Setup optimizer opt = lbann.SGD(learn_rate=learning_rate, momentum=0.9) # opt = lbann.Adam(learn_rate=learning_rate, beta1=0.9, beta2=0.999, eps=1e-8) # Setup data reader data_reader = make_data_reader(num_patches) # Return experiment objects return model, data_reader, opt
def construct_model(): """Construct LBANN model. JAG Wasserstein autoencoder model """ import lbann # Layer graph input = lbann.Input(target_mode='N/A',name='inp_data') # data is 64*64*4 images + 15 scalar + 5 param inp_slice = lbann.Slice(input, axis=0, slice_points="0 16399 16404",name='inp_slice') gt_y = lbann.Identity(inp_slice,name='gt_y') gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used zero = lbann.Constant(value=0.0,num_neurons='1',name='zero') one = lbann.Constant(value=1.0,num_neurons='1',name='one') y_dim = 16399 #image+scalar shape z_dim = 20 #Latent space dim z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims="20") d1_real, d1_fake, d_adv, pred_y = jag_models.WAE(z_dim,y_dim)(z,gt_y) d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce') img_loss = lbann.MeanSquaredError([pred_y,gt_y]) rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors="1 -1")) layers = list(lbann.traverse_layer_graph(input)) # Setup objective function weights = set() src_layers = [] dst_layers = [] for l in layers: if(l.weights and "disc0" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2 if(l.weights and "disc1" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4) d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01) obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg]) # Initialize check metric callback metrics = [lbann.Metric(img_loss, name='recon_error')] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackReplaceWeights(source_layers=list2str(src_layers), destination_layers=list2str(dst_layers), batch_interval=2)] # Construct model num_epochs = 100 return lbann.Model(num_epochs, weights=weights, layers=layers, metrics=metrics, objective_function=obj, callbacks=callbacks)
name="bin_cross_entropy_sum", mode="sum") mean_squared_error = lbann.MeanSquaredError([reconstruction, image], name="mean_squared_error") layer_list = list(lbann.traverse_layer_graph(input_)) # Set up objective function layer_term1 = lbann.LayerTerm(bin_cross_entropy) layer_term2 = lbann.LayerTerm(kldiv) l2_reg = lbann.L2WeightRegularization(scale=0.0005) obj = lbann.ObjectiveFunction([layer_term1, layer_term2, l2_reg]) # Metrics metrics = [lbann.Metric(mean_squared_error, name="mean squared error")] # Callbacks callbacks = [ lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackSaveImages(layers="image reconstruction", image_format="jpg") ] # Setup Model model = lbann.Model(args.num_epochs, layers=layer_list, objective_function=obj, metrics=metrics, callbacks=callbacks, summary_dir=".")
lbann.ValueInitializer(values=str_list(np_x.flatten())), ), dims=str_list(np_x.shape), ) lbann_y = FFTShift()(lbann_x, dims) lbann_scales = lbann.WeightsLayer( weights=lbann.Weights( lbann.ValueInitializer(values=str_list(np_scales)), optimizer=lbann.NoOptimizer(), ), dims=str_list(np_scales.shape), ) lbann_z = lbann.MatMul(lbann.Reshape(lbann_y, dims=str_list([1, -1])), lbann.Reshape(lbann_scales, dims=str_list([-1, 1]))) # Construct LBANN model with metric checking and gradient checking metric = lbann.Metric(lbann_z, name='metric') callbacks = [ lbann.CallbackCheckMetric( metric=metric.name, lower_bound=np_z - tol, upper_bound=np_z + tol, error_on_failure=True, execution_modes='test', ), lbann.CallbackCheckGradients(error_on_failure=True), ] model = lbann.Model( epochs=0, layers=lbann.traverse_layer_graph([input_, lbann_x]), objective_function=lbann_z, metrics=metric,
tensor_ops_mode = lbann.ConvTensorOpsMode.NO_TENSOR_OPS for l in layers: if type(l) == lbann.Convolution: l.conv_tensor_op_mode = tensor_ops_mode # Setup objective function l2_reg_weights = set() for l in layers: if type(l) == lbann.Convolution or type(l) == lbann.FullyConnected: l2_reg_weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=l2_reg_weights, scale=1e-4) obj = lbann.ObjectiveFunction([cross_entropy, l2_reg]) # Setup model metrics = [ lbann.Metric(top1, name='top-1 accuracy', unit='%'), lbann.Metric(top5, name='top-5 accuracy', unit='%') ] callbacks = [ lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackDropFixedLearningRate(drop_epoch=[30, 60, 80], amt=0.1) ] if args.warmup: callbacks.append( lbann.CallbackLinearGrowthLearningRate(target=0.1 * args.mini_batch_size / 256, num_epochs=5)) model = lbann.Model(args.num_epochs, layers=layers, objective_function=obj,
def construct_model(): """Construct LBANN model. ExaGAN model """ import lbann # Layer graph input = lbann.Input(target_mode='N/A',name='inp_img') #label flipping label_flip_rand = lbann.Uniform(min=0,max=1, neuron_dims='1') label_flip_prob = lbann.Constant(value=0.01, num_neurons='1') one = lbann.GreaterEqual(label_flip_rand,label_flip_prob, name='is_real') zero = lbann.LogicalNot(one,name='is_fake') z = lbann.Reshape(lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims="64", name='noise_vec'),dims='1 64') d1_real, d1_fake, d_adv, gen_img = ExaGAN.CosmoGAN()(input,z) d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce') layers = list(lbann.traverse_layer_graph(input)) # Setup objective function weights = set() src_layers = [] dst_layers = [] for l in layers: if(l.weights and "disc1" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2, analogous to discrim.trainable=False in Keras if(l.weights and "disc2" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) #l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4) obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce]) # Initialize check metric callback metrics = [lbann.Metric(d1_real_bce,name='d_real'), lbann.Metric(d1_fake_bce, name='d_fake'), lbann.Metric(d_adv_bce,name='gen')] callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer(), #Uncomment to dump output for plotting and further statistical analysis #lbann.CallbackDumpOutputs(layers='inp_img gen_img_instance1_activation', # execution_modes='train validation', # directory='dump_outs', # batch_interval=100, # format='npy'), lbann.CallbackReplaceWeights(source_layers=list2str(src_layers), destination_layers=list2str(dst_layers), batch_interval=2)] # Construct model num_epochs = 20 return lbann.Model(num_epochs, weights=weights, layers=layers, metrics=metrics, objective_function=obj, callbacks=callbacks)
name="reconstruction", data_layout="model_parallel") dropout2 = lbann.Dropout(reconstruction, name="dropout2", data_layout="model_parallel", keep_prob=0.8) # Reconstruction mean_squared_error = lbann.MeanSquaredError([dropout2, image], name="mean_squared_error") layer_term = lbann.LayerTerm(mean_squared_error) obj = lbann.ObjectiveFunction(layer_term) metrics = [lbann.Metric(mean_squared_error, name=mean_squared_error.name)] img_strategy = lbann.TrackSampleIDsStrategy(input_layer_name=input_.name, num_tracked_images=10) summarize_images = lbann.CallbackSummarizeImages( selection_strategy=img_strategy, image_source_layer_name=reconstruction.name, epoch_interval=1) # Dump original image from input layer one time summarize_input_layer = lbann.CallbackSummarizeImages( selection_strategy=img_strategy, image_source_layer_name=input_.name, epoch_interval=10000)
# Loss function and accuracy loss = lbann.CrossEntropy(probs, labels) acc = lbann.CategoricalAccuracy(probs, labels) # ---------------------------------- # Setup experiment # ---------------------------------- # Setup model mini_batch_size = 64 num_epochs = 20 model = lbann.Model(num_epochs, layers=lbann.traverse_layer_graph(input_), objective_function=loss, metrics=[lbann.Metric(acc, name='accuracy', unit='%')], callbacks=[ lbann.CallbackPrintModelDescription(), lbann.CallbackPrint(), lbann.CallbackTimer() ]) # Setup optimizer opt = lbann.SGD(learn_rate=0.01, momentum=0.9) # Setup data reader data_reader = data.mnist.make_data_reader() # Setup trainer trainer = lbann.Trainer(mini_batch_size=mini_batch_size)
def construct_jag_wae_model(ydim, zdim, mcf, useCNN, dump_models, ltfb_batch_interval, num_epochs): """Construct LBANN model. JAG Wasserstein autoencoder model """ # Layer graph input = lbann.Input(data_field='samples', name='inp_data') # data is 64*64*4 images + 15 scalar + 5 param #inp_slice = lbann.Slice(input, axis=0, slice_points="0 16399 16404",name='inp_slice') inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0, ydim, ydim + 5]), name='inp_slice') gt_y = lbann.Identity(inp_slice, name='gt_y') gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used zero = lbann.Constant(value=0.0, num_neurons='1', name='zero') one = lbann.Constant(value=1.0, num_neurons='1', name='one') z_dim = 20 #Latent space dim z = lbann.Gaussian(mean=0.0, stdev=1.0, neuron_dims="20") model = macc_network_architectures.MACCWAE(zdim, ydim, cf=mcf, use_CNN=useCNN) d1_real, d1_fake, d_adv, pred_y = model(z, gt_y) d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real, one], name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake, zero], name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv, one], name='d_adv_bce') img_loss = lbann.MeanSquaredError([pred_y, gt_y]) rec_error = lbann.L2Norm2( lbann.WeightedSum([pred_y, gt_y], scaling_factors="1 -1")) layers = list(lbann.traverse_layer_graph(input)) # Setup objective function weights = set() src_layers = [] dst_layers = [] for l in layers: if (l.weights and "disc0" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2 if (l.weights and "disc1" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4) d_adv_bce = lbann.LayerTerm(d_adv_bce, scale=0.01) obj = lbann.ObjectiveFunction( [d1_real_bce, d1_fake_bce, d_adv_bce, img_loss, rec_error, l2_reg]) # Initialize check metric callback metrics = [lbann.Metric(img_loss, name='recon_error')] #pred_y = macc_models.MACCWAE.pred_y_name callbacks = [ lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackPrintModelDescription(), lbann.CallbackSaveModel(dir=dump_models), lbann.CallbackReplaceWeights(source_layers=list2str(src_layers), destination_layers=list2str(dst_layers), batch_interval=2) ] if (ltfb_batch_interval > 0): callbacks.append( lbann.CallbackLTFB(batch_interval=ltfb_batch_interval, metric='recon_error', low_score_wins=True, exchange_hyperparameters=True)) # Construct model return lbann.Model(num_epochs, weights=weights, layers=layers, metrics=metrics, objective_function=obj, callbacks=callbacks)
def make_model(num_vertices=None, node_features=None, num_classes=None, dataset=None, kernel_type='GCN', callbacks=None, num_epochs=1): '''Construct a model DAG using one of the Graph Kernels Args: num_vertices (int): Number of vertices of each graph (default: None) node_features (int): Number of features per noded (default: None) num_classes (int): Number of classes as targets (default: None) dataset (str): Preset data set to use. Either a datset parameter has to be supplied or all of num_vertices, node_features, and num_classes have to be supplied. (default: None) kernel_type (str): Graph Kernel to use in model. Expected one of GCN, GIN, Graph, or GatedGraph (deafult: GCN) callbacks (list): Callbacks for the model. If set to None the model description, GPU usage, training_output, and timer is reported. (default: None) num_epochs (int): Number of epochs to run (default: 1) Returns: (lbann Model Object: A model object with the supplied callbacks, dataset presets, and graph kernels. ''' assert num_vertices != dataset #Ensure atleast one of the values is set if dataset is not None: assert num_vertices is None if dataset == 'MNIST': num_vertices = 75 num_classes = 10 node_features = 1 elif dataset == 'PROTEINS': num_vertices = 100 num_classes = 2 node_features = 3 else: raise Exception("Unkown Dataset") assert num_vertices is not None assert num_classes is not None assert node_features is not None #---------------------------------- # Reshape and Slice Input Tensor #---------------------------------- input_ = lbann.Input(target_mode='classification') # Input dimensions should be (num_vertices * node_features + num_vertices^2 + num_classes ) # Input should have atleast two children since the target is classification data = lbann_Graph_Data(input_, num_vertices, node_features, num_classes) feature_matrix = data.x adj_matrix = data.adj target = data.y #---------------------------------- # Perform Graph Convolution #---------------------------------- if kernel_type == 'GIN': x = GINConvLayer(feature_matrix, adj_matrix) elif kernel_type == 'GCN': x = GCNConvLayer(feature_matrix, adj_matrix) elif kernel_type == 'Graph': x = GraphConvLayer(feature_matrix, adj_matrix) elif kernel_type == 'GatedGraph': x = GATConvLayer(feature_matrix, adj_matrix) else: ValueError( 'Invalid Graph kernel specifier "{}" recieved. Expected one of:\ GIN,GCN,Graph or GatedGraph'.format(kernel_type)) out_channel = x.shape[1] #---------------------------------- # Apply Reduction on Node Features #---------------------------------- average_vector = lbann.Constant(value=1 / num_vertices, num_neurons=str_list([1, num_vertices]), name="Average_Vector") x = x.get_mat(out_channel) x = lbann.MatMul(average_vector, x, name="Node_Feature_Reduction") # X is now a vector with output_channel dimensions x = lbann.Reshape(x, dims=str_list([out_channel]), name="Squeeze") x = lbann.FullyConnected(x, num_neurons=64, name="hidden_layer_1") x = lbann.Relu(x, name="hidden_layer_1_activation") x = lbann.FullyConnected(x, num_neurons=num_classes, name="Output_Fully_Connected") #---------------------------------- # Loss Function and Accuracy s #---------------------------------- probs = lbann.Softmax(x, name="Softmax") loss = lbann.CrossEntropy(probs, target, name="Cross_Entropy_Loss") accuracy = lbann.CategoricalAccuracy(probs, target, name="Accuracy") layers = lbann.traverse_layer_graph(input_) if callbacks is None: print_model = lbann.CallbackPrintModelDescription( ) #Prints initial Model after Setup training_output = lbann.CallbackPrint( interval=1, print_global_stat_only=False) #Prints training progress gpu_usage = lbann.CallbackGPUMemoryUsage() timer = lbann.CallbackTimer() callbacks = [print_model, training_output, gpu_usage, timer] else: if isinstance(callbacks, list): callbacks = callbacks metrics = [lbann.Metric(accuracy, name='accuracy', unit="%")] model = lbann.Model(num_epochs, layers=layers, objective_function=loss, metrics=metrics, callbacks=callbacks) return model
def construct_macc_surrogate_model(xdim, ydim, zdim, wae_mcf, surrogate_mcf, lambda_cyc, useCNN, dump_models, pretrained_dir, ltfb_batch_interval, num_epochs): """Construct MACC surrogate model. See https://arxiv.org/pdf/1912.08113.pdf model architecture and other details """ # Layer graph input = lbann.Input(data_field='samples', name='inp_data') # data is 64*64*4 images + 15 scalar + 5 param inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0, ydim, ydim + xdim]), name='inp_slice') gt_y = lbann.Identity(inp_slice, name='gt_y') gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used zero = lbann.Constant(value=0.0, num_neurons='1', name='zero') one = lbann.Constant(value=1.0, num_neurons='1', name='one') z = lbann.Gaussian(mean=0.0, stdev=1.0, neuron_dims="20") wae = macc_network_architectures.MACCWAE( zdim, ydim, cf=wae_mcf, use_CNN=useCNN) #pretrained, freeze inv = macc_network_architectures.MACCInverse(xdim, cf=surrogate_mcf) fwd = macc_network_architectures.MACCForward(zdim, cf=surrogate_mcf) y_pred_fwd = wae.encoder(gt_y) param_pred_ = wae.encoder(gt_y) input_fake = inv(param_pred_) output_cyc = fwd(input_fake) y_image_re2 = wae.decoder(output_cyc) '''**** Train cycleGAN input params <--> latent space of (images, scalars) ****''' output_fake = fwd(gt_x) y_image_re = wae.decoder(output_fake) param_pred2_ = wae.encoder(y_image_re) input_cyc = inv(param_pred2_) L_l2_x = lbann.MeanSquaredError(input_fake, gt_x) L_cyc_x = lbann.MeanSquaredError(input_cyc, gt_x) L_l2_y = lbann.MeanSquaredError(output_fake, y_pred_fwd) L_cyc_y = lbann.MeanSquaredError(output_cyc, y_pred_fwd) #@todo slice here to separate scalar from image img_sca_loss = lbann.MeanSquaredError(y_image_re, gt_y) #L_cyc = L_cyc_y + L_cyc_x L_cyc = lbann.Add(L_cyc_y, L_cyc_x) #loss_gen0 = L_l2_y + lamda_cyc*L_cyc loss_gen0 = lbann.WeightedSum([L_l2_y, L_cyc], scaling_factors=f'1 {lambda_cyc}') loss_gen1 = lbann.WeightedSum([L_l2_x, L_cyc_y], scaling_factors=f'1 {lambda_cyc}') #loss_gen1 = L_l2_x + lamda_cyc*L_cyc_y layers = list(lbann.traverse_layer_graph(input)) weights = set() #Freeze appropriate (pretrained) weights pretrained_models = ["wae"] #add macc? for l in layers: for idx in range(len(pretrained_models)): if (l.weights and pretrained_models[idx] in l.name): for w in range(len(l.weights)): l.weights[w].optimizer = lbann.NoOptimizer() weights.update(l.weights) l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4) #d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01) # Setup objective function obj = lbann.ObjectiveFunction([loss_gen0, loss_gen1, l2_reg]) # Initialize check metric callback metrics = [ lbann.Metric(img_sca_loss, name='fw_loss'), lbann.Metric(L_l2_x, name='inverse loss'), lbann.Metric(L_cyc_y, name='output cycle loss'), lbann.Metric(L_cyc_x, name='param cycle loss') ] callbacks = [ lbann.CallbackPrint(), lbann.CallbackSaveModel(dir=dump_models), lbann.CallbackLoadModel(dirs=str(pretrained_dir)), lbann.CallbackTimer() ] if (ltfb_batch_interval > 0): callbacks.append( lbann.CallbackLTFB(batch_interval=ltfb_batch_interval, metric='fw_loss', low_score_wins=True, exchange_hyperparameters=True)) # Construct model return lbann.Model(num_epochs, weights=weights, layers=layers, metrics=metrics, objective_function=obj, callbacks=callbacks)
def construct_model(run_args): """Construct LBANN model. Initial model for ATOM molecular VAE """ import lbann print("Dump model dir ", run_args.dump_model_dir) assert run_args.dump_model_dir, "evaluate script asssumes a pretrained WAE model" pad_index = run_args.pad_index assert pad_index is not None sequence_length = run_args.sequence_length assert sequence_length is not None print("sequence length is {}".format(sequence_length)) data_layout = "data_parallel" # Layer graph input_ = lbann.Identity(lbann.Input(name='inp', data_field='samples'), name='inp1') wae_loss = [] input_feature_dims = sequence_length embedding_size = run_args.embedding_dim dictionary_size = run_args.num_embeddings assert embedding_size is not None assert dictionary_size is not None save_output = True if run_args.dump_outputs_dir else False print("save output? ", save_output, "out dir ", run_args.dump_outputs_dir) z = lbann.Gaussian(mean=0.0, stdev=1.0, neuron_dims=run_args.z_dim) waemodel = molwae.MolWAE(input_feature_dims, dictionary_size, embedding_size, pad_index, run_args.z_dim, save_output) recon, d1_real, d1_fake, d_adv, arg_max = waemodel(input_, z) zero = lbann.Constant(value=0.0, num_neurons='1', name='zero') one = lbann.Constant(value=1.0, num_neurons='1', name='one') d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real, one], name='d1_real_bce') d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake, zero], name='d1_fake_bce') d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv, one], name='d_adv_bce') wae_loss.append(recon) layers = list(lbann.traverse_layer_graph(input_)) # Setup objective function weights = set() src_layers = [] dst_layers = [] for l in layers: if (l.weights and "disc0" in l.name and "instance1" in l.name): src_layers.append(l.name) #freeze weights in disc2 if (l.weights and "disc1" in l.name): dst_layers.append(l.name) for idx in range(len(l.weights)): l.weights[idx].optimizer = lbann.NoOptimizer() weights.update(l.weights) l2_weights = [ w for w in weights if not isinstance(w.optimizer, lbann.NoOptimizer) ] l2_reg = lbann.L2WeightRegularization(weights=l2_weights, scale=1e-4) wae_loss.append(d1_real_bce) wae_loss.append(d_adv_bce) wae_loss.append(d1_fake_bce) wae_loss.append(l2_reg) print("LEN wae loss ", len(wae_loss)) obj = lbann.ObjectiveFunction(wae_loss) # Initialize check metric callback metrics = [ lbann.Metric(d_adv_bce, name='adv_loss'), lbann.Metric(recon, name='recon') ] callbacks = [ lbann.CallbackPrint(), #lbann.CallbackStepLearningRate(step=10, amt=0.5), lbann.CallbackTimer() ] callbacks.append( lbann.CallbackReplaceWeights(source_layers=list2str(src_layers), destination_layers=list2str(dst_layers), batch_interval=2)) #Dump output (activation) for post processing if (run_args.dump_outputs_dir): pred_tensor = lbann.Concatenation(arg_max, name='pred_tensor') callbacks.append( lbann.CallbackDumpOutputs( batch_interval=run_args.dump_outputs_interval, execution_modes='test', directory=run_args.dump_outputs_dir, layers=f'inp pred_tensor {waemodel.q_mu.name}')) # Construct model return lbann.Model(run_args.num_epochs, weights=weights, layers=layers, objective_function=obj, metrics=metrics, callbacks=callbacks)
pearson_r_sqrt = lbann.Sqrt(pearson_r_mult, name="pearson_r_sqrt", data_layout="model_parallel") pearson_r = lbann.Divide([pearson_r_cov, pearson_r_sqrt], name="pearson_r", data_layout="model_parallel") layer_list = list(lbann.traverse_layer_graph(input_)) # Set up objective function layer_term = lbann.LayerTerm(mean_squared_error) obj = lbann.ObjectiveFunction(layer_term) # Metrics metrics = [lbann.Metric(pearson_r, name="Pearson correlation")] # Callbacks callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] # Setup Model model = lbann.Model(args.num_epochs, layers=layer_list, objective_function=obj, metrics=metrics, callbacks=callbacks, summary_dir=".") # Setup optimizer opt = lbann.contrib.args.create_optimizer(args)
pearson_r_cov = lbann.Covariance([pred, responses], name="pearson_r_cov") pearson_r_var1 = lbann.Variance(responses, name="pearson_r_var1") pearson_r_var2 = lbann.Variance(pred, name="pearson_r_var2") pearson_r_mult = lbann.Multiply([pearson_r_var1, pearson_r_var2], name="pearson_r_mult") pearson_r_sqrt = lbann.Sqrt(pearson_r_mult, name="pearson_r_sqrt") eps = lbann.Constant(value=1e-07, hint_layer=pearson_r_sqrt) pearson_r = lbann.Divide( [pearson_r_cov, lbann.Add(pearson_r_sqrt, eps)], name="pearson_r") metrics = [lbann.Metric(mse, name='mse')] metrics.append(lbann.Metric(pearson_r, name='pearson_r')) callbacks = [lbann.CallbackPrint(), lbann.CallbackTimer()] layers = list(lbann.traverse_layer_graph([images, responses])) model = lbann.Model(args.num_epochs, layers=layers, metrics=metrics, objective_function=mse, callbacks=callbacks) # Load data reader from prototext data_reader_proto = lbann.lbann_pb2.LbannPB() with open(data_reader_prototext, 'r') as f: txtf.Merge(f.read(), data_reader_proto)
def gen_layers(latent_dim, number_of_atoms): ''' Generates the model for the 3D Convolutional Auto Encoder. returns the Directed Acyclic Graph (DAG) that the lbann model will run on. ''' input_ = lbann.Input(target_mode="reconstruction") tensors = lbann.Identity(input_) tensors = lbann.Reshape(tensors, dims="11 32 32 32", name="Sample") # Input tensor shape is (number_of_atoms)x32x32x32 # Encoder x = lbann.Identity(tensors) for i in range(4): out_channels = latent_dim // (2**(3 - i)) x = lbann.Convolution(x, num_dims=3, num_output_channels=out_channels, num_groups=1, conv_dims_i=4, conv_strides_i=2, conv_dilations_i=1, conv_pads_i=1, has_bias=True, name="Conv_{0}".format(i)) x = lbann.BatchNormalization(x, name="Batch_NORM_{0}".format(i + 1)) x = lbann.LeakyRelu(x, name="Conv_{0}_Activation".format(i + 1)) # Shape: (latent_dim)x2x2x2 encoded = lbann.Convolution(x, num_dims=3, num_output_channels=latent_dim, num_groups=1, conv_dims_i=2, conv_strides_i=2, conv_dilations_i=1, conv_pads_i=0, has_bias=True, name="encoded") # Shape: (latent_dim)1x1x1 # Decoder x = lbann.Deconvolution(encoded, num_dims=3, num_output_channels=number_of_atoms * 16, num_groups=1, conv_dims_i=4, conv_pads_i=0, conv_strides_i=2, conv_dilations_i=1, has_bias=True, name="Deconv_1") x = lbann.BatchNormalization(x, name="BN_D1") x = lbann.Tanh(x, name="Deconv_1_Activation") for i in range(3): out_channels = number_of_atoms * (2**(2 - i)) x = lbann.Deconvolution(x, num_dims=3, num_output_channels=out_channels, num_groups=1, conv_dims_i=4, conv_pads_i=1, conv_strides_i=2, conv_dilations_i=1, has_bias=True, name="Deconv_{0}".format(i + 2)) x = lbann.BatchNormalization(x, name="BN_D{0}".format(i + 2)) if ( i != 2 ): #Save the last activation layer because we want to dump the outputs x = lbann.Tanh(x, name="Deconv_{0}_Activation".format(i + 2)) decoded = lbann.Tanh(x, name="decoded") img_loss = lbann.MeanSquaredError([decoded, tensors]) metrics = [lbann.Metric(img_loss, name='recon_error')] # ---------------------------------- # Set up DAG # ---------------------------------- layers = lbann.traverse_layer_graph(input_) #Generate Model DAG return layers, img_loss, metrics
walk_embeddings = lbann.Identity(embeddings_slice) # Skip-Gram objective function positive_loss = model.skip_gram.positive_samples_loss( walk_length, lbann.Identity(walk_embeddings), lbann.Identity(walk_embeddings), scale_decay=0.8, ) negative_loss = model.skip_gram.negative_samples_loss( walk_embeddings, negative_samples_embeddings, ) obj.append(positive_loss) obj.append(lbann.WeightedSum(negative_loss, scaling_factors='2')) metrics.append(lbann.Metric(positive_loss, name='positive loss')) metrics.append(lbann.Metric(negative_loss, name='negative loss')) # Perform computation at double precision for l in lbann.traverse_layer_graph(input_): l.datatype = lbann.DataType.DOUBLE for w in l.weights: w.datatype = lbann.DataType.DOUBLE # ---------------------------------- # Run LBANN # ---------------------------------- # Create optimizer opt = lbann.SGD(learn_rate=args.learning_rate)
# Construct layer graph # ---------------------------------- # Input data input = lbann.Input(io_buffer='partitioned', target_mode='regression') universes = lbann.Identity(input) secrets = lbann.Identity(input) # CosmoFlow x = CosmoFlow(args.output_size, args.input_width).forward(universes) # Loss function loss = lbann.MeanSquaredError([x, secrets]) # Metrics metrics = [lbann.Metric(loss, name="MSE", unit="")] # Callbacks callbacks = [ lbann.CallbackPrint(), lbann.CallbackTimer(), lbann.CallbackPolyLearningRate( power=1.0, num_epochs=100, # TODO: Warn if args.epochs < 100 ), lbann.CallbackGPUMemoryUsage(), lbann.CallbackDumpOutputs(directory="dump_acts/", layers="cosmoflow_module1_fc3_instance1 layer3", execution_modes="test"), lbann.CallbackProfiler(skip_init=True) ]