예제 #1
0
def convolutional_model_broad_map_reg(Inputs,nclasses,nregclasses,dropoutRate):
    """
    reference 1x1 convolutional model for 'deepFlavour'
    """  
    
    cpf,npf,vtx = block_deepFlavourConvolutions(charged=Inputs[1],
                                                neutrals=Inputs[2],
                                                vertices=Inputs[3],
                                                dropoutRate=dropoutRate)
    
    
    
    cpf  = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf)
    cpf = Dropout(dropoutRate)(cpf)
    
    npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf)
    npf = Dropout(dropoutRate)(npf)
    
    vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx)
    vtx = Dropout(dropoutRate)(vtx)
    
    image = block_SchwartzImage(image=Inputs[4],dropoutRate=dropoutRate)
    
    x = Concatenate()( [Inputs[0],cpf,npf,vtx,image,Inputs[5] ])
    
    x  = block_deepFlavourDense(x,dropoutRate)
    
    predictions = [Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x),
                   Dense(nregclasses, activation='linear',kernel_initializer='ones',name='E_pred')(x)]
    model = Model(inputs=Inputs, outputs=predictions)
    return model
예제 #2
0
def convolutional_model_broad_reg(Inputs,nclasses,nregclasses,dropoutRate=-1):
    """
    the inputs are really not working as they are. need a reshaping well before
    """  
    cpf,npf,vtx = block_deepFlavourConvolutions(charged=Inputs[1],
                                                neutrals=Inputs[2],
                                                vertices=Inputs[3],
                                                dropoutRate=dropoutRate)
    
    
    cpf  = LSTM(150,go_backwards=True,implementation=2, name='cpf_lstm')(cpf)
    cpf = Dropout(dropoutRate)(cpf)
    
    npf = LSTM(50,go_backwards=True,implementation=2, name='npf_lstm')(npf)
    npf = Dropout(dropoutRate)(npf)
    
    vtx = LSTM(50,go_backwards=True,implementation=2, name='vtx_lstm')(vtx)
    vtx = Dropout(dropoutRate)(vtx)
    
    x = Concatenate()( [Inputs[0],cpf,npf,vtx,Inputs[4] ])
    
    x  = block_deepFlavourDense(x,dropoutRate)
    
    
    predictions = [Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x),
                   Dense(nregclasses, activation='linear',kernel_initializer='ones',name='E_pred')(x)]
                   
    model = Model(inputs=Inputs, outputs=predictions)
    return model
예제 #3
0
def model_deepFlavourReference(Inputs, dropoutRate=0.1, momentum=0.6):
    """
    reference 1x1 convolutional model for 'deepFlavour'
    with recurrent layers and batch normalisation
    standard dropout rate it 0.1
    should be trained for flavour prediction first. afterwards, all layers can be fixed
    that do not include 'regression' and the training can be repeated focusing on the regression part
    (check function fixLayersContaining with invert=True)
    """
    globalvars = BatchNormalization(momentum=momentum,
                                    name='globals_input_batchnorm')(Inputs[0])
    cpf = BatchNormalization(momentum=momentum,
                             name='cpf_input_batchnorm')(Inputs[1])
    npf = BatchNormalization(momentum=momentum,
                             name='npf_input_batchnorm')(Inputs[2])
    vtx = BatchNormalization(momentum=momentum,
                             name='vtx_input_batchnorm')(Inputs[3])

    cpf, npf, vtx = block_deepFlavourConvolutions(charged=cpf,
                                                  neutrals=npf,
                                                  vertices=vtx,
                                                  dropoutRate=dropoutRate,
                                                  active=True,
                                                  batchnorm=True,
                                                  batchmomentum=momentum)

    #
    cpf = LSTM(150, go_backwards=True, implementation=2, name='cpf_lstm')(cpf)
    cpf = BatchNormalization(momentum=momentum, name='cpflstm_batchnorm')(cpf)
    cpf = Dropout(dropoutRate)(cpf)

    npf = LSTM(50, go_backwards=True, implementation=2, name='npf_lstm')(npf)
    npf = BatchNormalization(momentum=momentum, name='npflstm_batchnorm')(npf)
    npf = Dropout(dropoutRate)(npf)

    vtx = LSTM(50, go_backwards=True, implementation=2, name='vtx_lstm')(vtx)
    vtx = BatchNormalization(momentum=momentum, name='vtxlstm_batchnorm')(vtx)
    vtx = Dropout(dropoutRate)(vtx)

    x = Concatenate()([globalvars, cpf, npf, vtx])

    x = block_deepFlavourDense(x,
                               dropoutRate,
                               active=True,
                               batchnorm=True,
                               batchmomentum=momentum)

    flavour_pred = Dense(6,
                         activation='softmax',
                         kernel_initializer='lecun_uniform',
                         name='ID_pred')(x)

    predictions = [flavour_pred]
    model = Model(inputs=Inputs, outputs=predictions)
    return model