def convolutional_model_broad_reg(Inputs,nclasses,nregclasses,dropoutRate=-1): """ the inputs are really not working as they are. need a reshaping well before """ cpf,npf,vtx = block_deepFlavourConvolutions(charged=Inputs[1], neutrals=Inputs[2], vertices=Inputs[3], dropoutRate=dropoutRate) cpf = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf) cpf = Dropout(dropoutRate)(cpf) npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf) npf = Dropout(dropoutRate)(npf) vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx) vtx = Dropout(dropoutRate)(vtx) x = Concatenate()( [Inputs[0],cpf,npf,vtx,Inputs[4] ]) x = block_deepFlavourDense(x,dropoutRate) predictions = [Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x), Dense(nregclasses, activation='linear',kernel_initializer='ones',name='E_pred')(x)] model = Model(inputs=Inputs, outputs=predictions) return model
def convolutional_model_deepcsv(Inputs,nclasses,nregclasses,dropoutRate=-1): cpf=Inputs[1] vtx=Inputs[2] cpf = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv0')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv1')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='cpf_conv2')(cpf) cpf = Dropout(dropoutRate)(cpf) cpf = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu' , name='cpf_conv3')(cpf) vtx = Convolution1D(64, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv0')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv1')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(32, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv2')(vtx) vtx = Dropout(dropoutRate)(vtx) vtx = Convolution1D(8, 1, kernel_initializer='lecun_uniform', activation='relu', name='vtx_conv3')(vtx) cpf=Flatten()(cpf) vtx=Flatten()(vtx) x = Concatenate()( [Inputs[0],cpf,vtx ]) x = block_deepFlavourDense(x,dropoutRate) predictions = Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x) model = Model(inputs=Inputs, outputs=predictions) return model
def convolutional_model_broad_map_reg(Inputs,nclasses,nregclasses,dropoutRate): """ reference 1x1 convolutional model for 'deepFlavour' """ cpf,npf,vtx = block_deepFlavourConvolutions(charged=Inputs[1], neutrals=Inputs[2], vertices=Inputs[3], dropoutRate=dropoutRate) cpf = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf) cpf = Dropout(dropoutRate)(cpf) npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf) npf = Dropout(dropoutRate)(npf) vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx) vtx = Dropout(dropoutRate)(vtx) image = block_SchwartzImage(image=Inputs[4],dropoutRate=dropoutRate) x = Concatenate()( [Inputs[0],cpf,npf,vtx,image,Inputs[5] ]) x = block_deepFlavourDense(x,dropoutRate) predictions = [Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x), Dense(nregclasses, activation='linear',kernel_initializer='ones',name='E_pred')(x)] model = Model(inputs=Inputs, outputs=predictions) return model
def model_deepFlavourNoNeutralReference(Inputs, nclasses, nregclasses, dropoutRate=0.1): """ reference 1x1 convolutional model for 'deepFlavour' with recurrent layers and batch normalisation standard dropout rate it 0.1 should be trained for flavour prediction first. afterwards, all layers can be fixed that do not include 'regression' and the training can be repeated focusing on the regression part (check function fixLayersContaining with invert=True) """ globalvars = BatchNormalization(momentum=0.6, name='globals_input_batchnorm')(Inputs[0]) cpf = BatchNormalization(momentum=0.6, name='cpf_input_batchnorm')(Inputs[1]) vtx = BatchNormalization(momentum=0.6, name='vtx_input_batchnorm')(Inputs[2]) ptreginput = BatchNormalization(momentum=0.6, name='reg_input_batchnorm')(Inputs[3]) cpf, vtx = block_deepFlavourBTVConvolutions(charged=cpf, vertices=vtx, dropoutRate=dropoutRate, active=True, batchnorm=True) # cpf = LSTM(150, go_backwards=True, implementation=2, name='cpf_lstm')(cpf) cpf = BatchNormalization(momentum=0.6, name='cpflstm_batchnorm')(cpf) cpf = Dropout(dropoutRate)(cpf) vtx = LSTM(50, go_backwards=True, implementation=2, name='vtx_lstm')(vtx) vtx = BatchNormalization(momentum=0.6, name='vtxlstm_batchnorm')(vtx) vtx = Dropout(dropoutRate)(vtx) x = Concatenate()([globalvars, cpf, vtx]) x = block_deepFlavourDense(x, dropoutRate, active=True, batchnorm=True, batchmomentum=0.6) flavour_pred = Dense(nclasses, activation='softmax', kernel_initializer='lecun_uniform', name='ID_pred')(x) reg = Concatenate()([flavour_pred, ptreginput]) reg_pred = Dense(nregclasses, activation='linear', kernel_initializer='ones', name='regression_pred', trainable=True)(reg) predictions = [flavour_pred, reg_pred] model = Model(inputs=Inputs, outputs=predictions) return model