norm = BatchNormalization(momentum=0.6, name='lstmdensenorm')(dense) for i in xrange(5): dense = Dense(50, activation='relu', name='dense%i' % i)(norm) norm = BatchNormalization(momentum=0.6, name='densenorm%i' % i)(dense) output_p = Dense(config.n_truth, activation='softmax')(norm) # model = Model(inputs=[input_charged, input_inclusive, input_sv], outputs=[output_p, output_b]) model = Model(inputs=input_inclusive, outputs=output_p) model.compile(optimizer=Adam(lr=0.0005), loss='categorical_crossentropy', metrics=['accuracy']) print model.summary() train_generator = obj.generatePF(data, partition='train', batch=100, mask=False) validation_generator = obj.generatePF(data, partition='validate', batch=100, mask=False) test_generator = obj.generatePF(data, partition='validate', batch=1000, mask=False) test_i, test_o, test_w = next(test_generator) pred = model.predict(test_i) print test_o[:5] print pred[:5] print test_o[-5:] print pred[-5:]
'tau32' : utils.NH1(np.arange(0,1.1,0.05)), 'dnn' : utils.NH1(np.arange(0,1.1,0.05)) } hists_qcd = { 'tau32' : utils.NH1(np.arange(0,1.1,0.05)), 'dnn' : utils.NH1(np.arange(0,1.1,0.05)) } hists_top['tau32'] = top_4.draw_singletons([('tau32', hists_top['tau32'].bins)], partition='test')['tau32'] hists_qcd['tau32'] = qcd_0.draw_singletons([('tau32', hists_qcd['tau32'].bins)], partition='test')['tau32'] top_4.refresh(partitions=['test']) qcd_0.refresh(partitions=['test']) # test_generator = obj.generatePFSV([top_4], partition='test', batch=100) test_generator = obj.generatePF(data, partition='test', batch=10000, repartition=False) while True: try: i, o, w = next(test_generator) pred = model.predict(i)[:,4] o = np.array(o) mask_signal = (o[:,4] == 1) mask_background = np.logical_not(mask_signal) hists_top['dnn'].fill_array(pred[mask_signal], w[mask_signal]) hists_qcd['dnn'].fill_array(pred[mask_background], w[mask_background]) except StopIteration: break OUTPUT = '/home/snarayan/public_html/figs/testplots/test/' system('mkdir -p '+OUTPUT)
dims = data[0].objects['train']['pf'].data.data.shape else: dims = (None, obj.limit, 9) # override ''' first build the classifier! ''' # set up data opts = { 'learn_mass': LEARNMASS, 'learn_pt': LEARNPT, 'learn_rho': LEARNRHO, 'normalize': False } classifier_train_gen = obj.generatePF(data, partition='train', batch=501, **opts) classifier_validation_gen = obj.generatePF(data, partition='validate', batch=1001, **opts) classifier_test_gen = obj.generatePF(data, partition='test', batch=2, **opts) test_i, test_o, test_w = next(classifier_test_gen) #print test_i inputs = Input(shape=(dims[1], dims[2]), name='input') mass_inputs = Input(shape=(1, ), name='mass_input') rho_inputs = Input(shape=(1, ), name='rho_input') pt_inputs = Input(shape=(1, ), name='pt_input') norm = BatchNormalization(momentum=0.6, name='input_bnorm')(inputs) conv = Conv1D(32,
if obj.limit is None: data[0].objects['train']['pf'].load(memory=False) dims = data[0].objects['train']['pf'].data.data.shape else: dims = (None, obj.limit, 9) # override ''' first build the classifier! ''' # set up data opts = {'learn_mass':LEARNMASS, 'learn_pt':LEARNPT, 'learn_rho':LEARNRHO, 'normalize':False} classifier_train_gen = obj.generatePF(data, partition='train', batch=502, **opts) classifier_validation_gen = obj.generatePF(data, partition='validate', batch=1002, **opts) classifier_test_gen = obj.generatePF(data, partition='test', batch=2, **opts) test_i, test_o, test_w = next(classifier_test_gen) #print test_i inputs = Input(shape=(dims[1], dims[2]), name='input') mass_inputs = Input(shape=(1,), name='mass_input') rho_inputs = Input(shape=(1,), name='rho_input') pt_inputs = Input(shape=(1,), name='pt_input') norm = BatchNormalization(momentum=0.6, name='input_bnorm') (inputs) conv = Conv1D(32, 2, activation='relu', name='conv0', kernel_initializer='lecun_uniform', padding='same')(norm) norm = BatchNormalization(momentum=0.6, name='conv0_bnorm') (conv) conv = Conv1D(16, 4, activation='relu', name='conv1', kernel_initializer='lecun_uniform', padding='same')(norm) norm = BatchNormalization(momentum=0.6, name='conv1_bnorm') (conv) lstm = LSTM(100, go_backwards=True, implementation=2, name='lstm') (norm)
# preload some data just to get the dimensions data[0].objects['train']['pf'].load(memory=False) dims = data[0].objects['train']['pf'].data.data.shape # obj.limit = 20 # dims = (None, 20, 9) # override ''' some global definitions ''' ''' first build the classifier! ''' # set up data classifier_train_gen = obj.generatePF(data, partition='train', batch=1000, normalize=False) classifier_validation_gen = obj.generatePF(data, partition='validate', batch=100) classifier_test_gen = obj.generatePF(data, partition='validate', batch=1000) test_i, test_o, test_w = next(classifier_test_gen) inputs = Input(shape=(dims[1], dims[2]), name='input') norm = BatchNormalization(momentum=0.6, name='input_bnorm')(inputs) lstm = LSTM(100, go_backwards=True, implementation=2, name='lstm')(norm) norm = BatchNormalization(momentum=0.6, name='lstm_norm')(lstm) dense = Dense(100, activation='relu', name='lstmdense', kernel_initializer='lecun_uniform')(norm)