コード例 #1
0
def GetTrees():
    train = training_base(
        testrun=False,
        renewtokens=False,
        pythonconsole=True,
        treefiles=
        '/home/ahill/output_directory/regress_results/train/treefiles.txt')
    print(train.inputData)
    print(train.outputDir)
    print(train.args)
    return train
コード例 #2
0
                      activation='softmax',
                      kernel_initializer='lecun_uniform',
                      name='ID_pred')(x)
    predictE = Dense(1,
                     activation='linear',
                     kernel_initializer='zeros',
                     name='pred_E')(x)

    predictions = [predictID, predictE]

    model = Model(inputs=Inputs, outputs=predictions)
    return model


# also dows all the parsing
train = training_base(testrun=False)

if not train.modelSet():
    train.setModel(twoDimModel, dropoutRate=0.05, momentum=0.9)

    train.compileModel(learningrate=0.0125,
                       loss=['categorical_crossentropy', 'mean_squared_error'],
                       metrics=['accuracy'],
                       loss_weights=[.05, 1.])

print(train.keras_model.summary())

model, history = train.trainModel(nepochs=200,
                                  batchsize=800,
                                  stop_patience=300,
                                  lr_factor=0.5,
コード例 #3
0
import os

os.environ['DECORRELATE'] = "True"
from DeepJetCore.training.training_base import training_base
from Losses import loss_NLL, loss_meansquared, loss_kldiv, global_loss_list
from DeepJetCore.modeltools import fixLayersContaining, printLayerInfosAndWeights
from Layers import global_layers_list
from Metrics import global_metrics_list

custom_objects_list = {}
custom_objects_list.update(global_loss_list)
custom_objects_list.update(global_layers_list)
custom_objects_list.update(global_metrics_list)

#also does all the parsing
train = training_base(testrun=True, renewtokens=False)

trainedModel = '/data/shared/BumbleB/DDBfull100/training/KERAS_check_best_model.h5'

if not train.modelSet():
    from models import model_DeepDoubleXReference as trainingModel

    train.setModel(trainingModel,
                   datasets=['db', 'pf', 'cpf'],
                   removedVars=None)

    train.compileModel(learningrate=0.001,
                       loss=[loss_kldiv],
                       metrics=['accuracy'],
                       loss_weights=[1.])
コード例 #4
0
ファイル: hgcal_denoising.py プロジェクト: mverzett/DeepHGCal
def masked_mean_square(truth, prediction):
    truth_data = truth[0]
    mask = truth[1]
    return K.mean(mask * K.square(truth_data - prediction), axis=-1)


inputs = []
shapes = [(13, 13, 55, 26)]
for s in shapes:
    inputs.append(keras.layers.Input(shape=s))

model = denoising_model(inputs, 5, 2)

#also dows all the parsing
train = training_base(testrun=False, resumeSilently=True)
train.setModel(denoising_model, dropoutRate=0.05, momentum=0.9)
train.compileModel(learningrate=0.0020, loss=[keras.losses.mean_squared_error])

print(train.keras_model.summary())
#train.train_data.maxFilesOpen=4
#exit()

model, history = train.trainModel(nepochs=10,
                                  batchsize=batch_size,
                                  stop_patience=300,
                                  lr_factor=0.3,
                                  lr_patience=-6,
                                  lr_epsilon=0.001,
                                  lr_cooldown=8,
                                  lr_minimum=0.000001,
コード例 #5
0
    x = Conv2D(8,(4,4),activation='relu', padding='same')(x)
    x = Conv2D(8,(4,4),activation='relu', padding='same')(x)
    x = Conv2D(8,(4,4),activation='relu', padding='same')(x)
    x = Conv2D(8,(4,4),strides=(2,2),activation='relu', padding='valid')(x)
    x = Conv2D(4,(4,4),strides=(2,2),activation='relu', padding='valid')(x)
    x = Flatten()(x)
    x = Dense(32, activation='relu')(x)
    
    x = Dense(nregressions, activaton='None')(x)
    
    predictions = [x]
    return Model(inputs=Inputs, outputs=predictions)



train=training_base(testrun=False,resumeSilently=False,renewtokens=True)


if not train.modelSet(): # allows to resume a stopped/killed training. Only sets the model if it cannot be loaded from previous snapshot

    #for regression use the regression model
    train.setModel(my_model,otheroption=1)
    
    #for regression use a different loss, e.g. mean_squared_error
    train.compileModel(learningrate=0.003,
                   loss='categorical_crossentropy') 
                   
print(train.keras_model.summary())


model,history = train.trainModel(nepochs=10, 
コード例 #6
0
from DeepJetCore.training.training_base import training_base
from Losses import global_loss_list
from DeepJetCore.MultiDataCollection import MultiDataCollection
from pdb import set_trace

#also does all the parsing
train=training_base(
	testrun=False, collection_class=MultiDataCollection,
	)
print 'Inited'
sizes = train.train_data.sizes
norm = float(sizes[2])/sizes[1] #normalization because samples have different sizes
train.train_data.setFlags([[1,0], [0,norm], [0,1]])
train.train_data.addYs([[[0,0]], [[1,0]], [[0,1]]])

train.val_data.setFlags([[1,0], [0,norm], [0,1]])
train.val_data.addYs([[[0,0]], [[1,0]], [[0,1]]])

if not train.modelSet():
    from models import dense_model_moments
    print 'Setting model'
    train.setModel(dense_model_moments, dropoutRate=0.1)
    
    train.compileModel(
			learningrate=0.003,
			loss=['categorical_crossentropy', global_loss_list['nd_4moment_loss']],
			loss_weights=[1., 0.0000001],
			#metrics=['accuracy'],
		)

コード例 #7
0
from DeepJetCore.training.training_base import training_base
from eval_funcs import loadModel, makeRoc, _byteify, makeLossPlot, makeComparisonPlots

trainDir = opts.n
inputTrainDataCollection = opts.i
inputTestDataCollection = opts.i
inputDataset = sampleDatasets_pf_cpf_sv

if TrainBool:
    args = MyClass()
    args.inputDataCollection = inputTrainDataCollection
    args.outputDir = trainDir
    args.multi_gpu = os.system("nvidia-smi -L") + 1
    print "nGPU:", args.multi_gpu

    train = training_base(splittrainandtest=0.9, testrun=False, parser=args)
    #train=training_base(splittrainandtest=0.9,testrun=False, args=args)
    if not train.modelSet():
        train.setModel(
            trainingModel,
            #num_classes=5, #num_regclasses=5,
            datasets=inputDataset,
        )
        if multi:
            loss = 'categorical_crossentropy'
            metric = 'categorical_accuracy'
        else:
            metric = 'binary_accuracy'
            loss = 'binary_crossentropy'
        train.compileModel(learningrate=0.001,
                           loss=[loss],
コード例 #8
0
ファイル: nf_pca.py プロジェクト: shahrukhqasim/HGCalML
    return RobustModel(inputs=Inputs,
                       outputs=[
                           pred_beta, pred_ccoords, pred_energy, pred_pos,
                           pred_time, pred_id, rs
                       ] + backgatheredids + backgathered_coords)


parser = ArgumentParser('Run the training')
parser.add_argument("-b", help="betascale", default=1., type=float)
parser.add_argument("-q", help="qmin", default=1., type=float)
parser.add_argument("-a", help="averaging strength", default=0.1, type=float)
parser.add_argument("-d", help="kalpha damp", default=0., type=float)

train = training_base(parser=parser,
                      testrun=False,
                      resumeSilently=True,
                      renewtokens=False)

if not train.modelSet():

    print('>>>>>>>>>>>>>\nsetting parameters to \nbeta_loss_scale',
          train.args.b)
    print('q_min', train.args.q)
    print('use_average_cc_pos', train.args.a)
    print('kalpha_damping_strength', train.args.d)
    print('<<<<<<<<<<<<<')

    train.setModel(gravnet_model,
                   beta_loss_scale=train.args.b,
                   q_min=train.args.q,
                   use_average_cc_pos=train.args.a,
コード例 #9
0
    x = LeakyReLU()(x)
    x = Conv2D(16, (1, 1), padding='same')(x)
    x = LeakyReLU()(x)
    #x = Conv2D(16,(8,8),padding='same')(x)
    #x = LeakyReLU()(x)
    #x = Conv2D(16,(8,8),padding='same')(x)
    #x = LeakyReLU()(x)
    x = Conv2D(1, (4, 4), padding='same')(x)
    x = LeakyReLU()(x)

    #use multiply with scalaer to avoid fed and fetched problem
    return Model(inputs=Inputs, outputs=[x] + feed_forward, name='generator')


train = training_base(testrun=False,
                      testrun_fraction=0.05,
                      resumeSilently=False,
                      renewtokens=True)

train.setGANModel(g_model, d_model)

#for regression use a different loss, e.g. mean_squared_error instead of categorical_crossentropy
#add some loss scaling factors here
train.compileModel(
    learningrate=0.0003,
    print_models=True,
    discr_loss_weights=None,  #[2.],
    gan_loss_weights=None)  #[1.])

train.trainGAN_exp(nepochs=1,
                   batchsize=500,
                   verbose=1,
コード例 #10
0
ファイル: diTau_reference.py プロジェクト: dntaylor/DeepJet
from Losses import loss_NLL, loss_meansquared
from DeepJetCore.modeltools import fixLayersContaining, printLayerInfosAndWeights

import subprocess

import tensorflow as tf
from keras import backend as k

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.6
k.tensorflow_backend.set_session(tf.Session(config=config))

#train=training_base(testrun=False,renewtokens=True,useAFS=False,useweights=True)
train = training_base(testrun=False,
                      renewtokens=True,
                      useAFS=False,
                      useweights=False)

if not train.modelSet():
    from models import model_diTauReference as trainingModel
    #from models import model_diTauDense as trainingModel

    datasets = ['global', 'cpf', 'npf', 'sv']
    #datasets = ['global']

    train.setModel(
        trainingModel,
        datasets=datasets,
        dropoutRate=0.1,
        momentum=0.9,
        batchnorm=False,
コード例 #11
0
from eval_functions import loadModel

inputDataset = sampleDatasets_pf_cpf_sv

if True:
    args = MyClass()
    args.inputDataCollection = opts.i
    args.outputDir = opts.o

    multi_gpu = len([
        x for x in os.popen("nvidia-smi -L").read().split("\n") if "GPU" in x
    ])
    print "nGPU:", multi_gpu

    train = training_base(splittrainandtest=0.9,
                          testrun=False,
                          resumeSilently=opts.resume,
                          parser=args)
    #train=training_base(splittrainandtest=0.9,testrun=False, args=args)
    if not train.modelSet():
        train.setModel(
            trainingModel,
            #num_classes=5, #num_regclasses=5,
            datasets=inputDataset,
            multi_gpu=multi_gpu)
        if opts.multi:
            loss = 'categorical_crossentropy'
            accuracy = 'categorical_accuracy'
        else:
            loss = 'binary_crossentropy'
            accuracy = 'binary_accuracy'
コード例 #12
0
def GetTrees():
          train=training_base(testrun=False,renewtokens=False, pythonconsole=True, treefiles='/home/ahill/DeepLearning/CMSSW_10_2_0_pre5/src/DeepML/regress_results/train/treefiles.txt')
          print(train.inputData)
          print(train.outputDir)
          print(train.args)
          return train
コード例 #13
0
ファイル: Train.py プロジェクト: rsyarif/DeepJet
        metrics = [acc_reg, mass_kldiv_q, mass_kldiv_h, loss_disc, loss_adv]
    elif opts.decor and opts.loss=='loss_kldiv': 
        loss = loss_kldiv
        metrics=[acc_kldiv, mass_kldiv_q, mass_kldiv_h, loss_disc_kldiv]
    elif opts.decor and opts.loss=='loss_kldiv_3class':
        loss = loss_kldiv_3class
        metrics=[acc_kldiv]
    elif opts.decor and opts.loss=='loss_jsdiv':
        loss = loss_jsdiv
        metrics=[acc_kldiv, mass_jsdiv_q, loss_disc_kldiv]
    else: 
        loss = 'categorical_crossentropy'
        metrics=['accuracy']

    # Set up training
    train=training_base(splittrainandtest=0.9,testrun=False, useweights=True, resumeSilently=opts.resume, renewtokens=False, parser=args)
    if not train.modelSet():
        modelargs = {}
        if opts.loss=='loss_reg':
            modelargs.update({'nRegTargets':NBINS,
                              'discTrainable': True,
                              'advTrainable':True})
        train.setModel(trainingModel, 
		       datasets=inputDataset, 
		       multi_gpu=multi_gpu,
                       **modelargs)
        train.compileModel(learningrate=0.001,
                           loss=[loss],
	                   metrics=metrics,
			   loss_weights=[1.])