コード例 #1
0
from training_base import training_base
from Losses import loss_NLL

#also does all the parsing
train = training_base(testrun=False)

if not train.modelSet():
    from DeepJet_models_ResNet import resnet_model

    train.setModel(resnet_model)

    train.compileModel(learningrate=0.0004,
                       loss=['categorical_crossentropy', loss_NLL],
                       metrics=['accuracy'],
                       loss_weights=[1., 0.000000000000001])

model, history = train.trainModel(nepochs=50,
                                  batchsize=10000,
                                  stop_patience=300,
                                  lr_factor=0.5,
                                  lr_patience=10,
                                  lr_epsilon=0.0001,
                                  lr_cooldown=2,
                                  lr_minimum=0.0001,
                                  maxqsize=100)
コード例 #2
0
# In[5]:
import setGPU
#os.environ['CUDA_VISIBLE_DEVICES'] = '1'

from training_base import training_base
from Losses import loss_NLL
import sys

args = MyClass()
args.inputDataCollection = '/cms-sc17/convert_20170717_ak8_deepDoubleB_db_sv_train_val/dataCollection.dc'
args.outputDir = 'train_deep_sv_removals_test'

TrainBool = True
EvalBool = True

train = training_base(testrun=False, args=args)

if TrainBool:
    #also does all the parsing

    if not train.modelSet():
        from DeepJet_models_removals import deep_model_removal_sv as model
        from DeepJet_models_removals import Slicer1D

        train.setModel(model)

        train.compileModel(learningrate=0.001,
                           loss=['categorical_crossentropy'],
                           metrics=['accuracy'])

        model, history, callbacks = train.trainModel(nepochs=2,
コード例 #3
0
from training_base import training_base
from MultiDataCollection import MultiDataCollection
from pdb import set_trace

#also does all the parsing
train = training_base(testrun=False, collection_class=MultiDataCollection)
print 'Inited'
sizes = train.train_data.sizes
norm = float(
    sizes[2]) / sizes[1]  #normalization because samples have different sizes
train.train_data.setFlags([[1, 0], [0, norm], [0, 1]])
train.train_data.addYs([[0], [1], [0]])

evt = train.train_data.generator().next()
set_trace()
train.val_data.setFlags([[1, 0], [0, norm], [0, 1]])
train.val_data.addYs([[0], [1], [0]])

if not train.modelSet():
    from models import dense_model_gradientReversal
    print 'Setting model'
    train.setModel(dense_model_gradientReversal, dropoutRate=0.1)

    train.compileModel(
        learningrate=0.003,
        loss=['categorical_crossentropy', 'binary_crossentropy'],
        #loss_weights=[1., 0.000000000001],
        metrics=['accuracy'])

model, history = train.trainModel(nepochs=50,
                                  batchsize=5000,
コード例 #4
0
from training_base import training_base
from Losses import loss_NLL

#also dows all the parsing
train = training_base(testrun=True)

from models import convolutional_model_broad_map_reg

train.setModel(convolutional_model_broad_map_reg, dropoutRate=0.1)

train.compileModel(learningrate=0.005,
                   loss=['categorical_crossentropy', loss_NLL],
                   metrics=['accuracy'])

model, history = train.trainModel(nepochs=5,
                                  batchsize=250,
                                  stop_patience=300,
                                  lr_factor=0.5,
                                  lr_patience=10,
                                  lr_epsilon=0.0001,
                                  lr_cooldown=2,
                                  lr_minimum=0.0001,
                                  maxqsize=10)
コード例 #5
0
from DeepJet_models_final import conv_model_final as trainingModel
from training_base import training_base
from eval_funcs import loadModel, makeRoc, _byteify, makeLossPlot, makeComparisonPlots, makeMetricPlots

trainDir = dayinfo + "_train" + opts.n
inputTrainDataCollection = trainDataCollection
inputTestDataCollection = testDataCollection
inputDataset = sampleDatasets_pf_cpf_sv

if TrainBool:
    args = MyClass()
    args.inputDataCollection = inputTrainDataCollection
    args.outputDir = trainDir

    #also does all the parsing
    train = training_base(splittrainandtest=0.9, testrun=False, args=args)
    if not train.modelSet():
        train.setModel(trainingModel, inputDataset, removedVars)

        train.compileModel(
            learningrate=0.001,
            loss=[
                'binary_crossentropy'
            ],  #other losses: categorical_crossentropy, kullback_leibler_divergence and many other in https://keras.io/losses/
            metrics=['accuracy', 'binary_accuracy', 'MSE', 'MSLE'],
            loss_weights=[1.])

        model, history, callbacks = train.trainModel(nepochs=1,
                                                     batchsize=1024,
                                                     stop_patience=1000,
                                                     lr_factor=0.7,