コード例 #1
0
print(train.keras_model.summary())

# **2 #this will be an upper limit on vertices per batch

verbosity = 2
import os

samplepath = train.val_data.getSamplePath(train.val_data.samples[0])
callbacks = []
for i in range(10):
    plotoutdir = train.outputDir + "/event_" + str(i + 2)
    os.system('mkdir -p ' + plotoutdir)
    callbacks.append(
        plotEventDuringTraining(outputfile=plotoutdir + "/sn",
                                samplefile=samplepath,
                                after_n_batches=4000,
                                batchsize=100000,
                                on_epoch_end=False,
                                use_event=2 + i))

from configSaver import copyModules
copyModules(train.outputDir)

from betaLosses import config as loss_config

loss_config.energy_loss_weight = 0.0001
loss_config.use_energy_weights = False
loss_config.q_min = 0.5
loss_config.no_beta_norm = False
loss_config.potential_scaling = 1.
loss_config.s_b = 1.
loss_config.position_loss_weight = 0.00001
コード例 #2
0
samplepath = train.val_data.getSamplePath(train.val_data.samples[0])
print("using sample for plotting ",samplepath)
callbacks = []

import os
publishpath = '[email protected]:/eos/home-j/jkiesele/www/HGCalML_trainings/'+os.path.basename(os.path.normpath(train.outputDir))
for i in range(6,10):
    ev = i 
    plotoutdir = train.outputDir + "/event_" + str(ev)
    os.system('mkdir -p ' + plotoutdir)
    callbacks.append(
        plotEventDuringTraining(
            outputfile=plotoutdir + "/sn",
            samplefile=samplepath,
            after_n_batches=200,
            batchsize=100000,
            on_epoch_end=False,
            publish = publishpath+"_event_"+ str(ev),
            use_event=ev)
    )
    
model, history = train.trainModel(nepochs=1,
                                  run_eagerly=True,
                                  batchsize=nbatch,
                                  batchsize_use_sum_of_squares=False,
                                  checkperiod=1,  # saves a checkpoint model every N epochs
                                  verbose=verbosity,
                                  backup_after_batches=100,
                                  additional_callbacks=callbacks+ 
                                  [CyclicLR (base_lr = learningrate,
                                 max_lr = learningrate*5.,