def validate(files, model, nParticles): """ :param files: :param model: :return: """ utils = Utilities(nParticles) for file in files: proc_name = file.split("/")[-1] X_1, Y_1, _, MVA = utils.BuildValidationDataset(file) pred = model.predict(X_1) pred.to_csv("{0}/{1}.csv".format(TRAINING_RES, proc_name), index=False) #Evaluate Results: return
Samples = glob(TEST_DATA) print(Samples) if config.get("model", "meta_name") == "DeepSets": # Build Architecture from the from Trainer_DeepSet import model_build model = model_build() model = load_model(config=config, epoch=epoch, model=model) else: model = load_model(config=config, epoch=epoch) print("Model Loaded") print("Sample", Samples) for sample in Samples: print(sample) X_valid, Y_valid, _, MVA = utils.BuildValidationDataset(sample, None) #print("MVA:", MVA.shape) #df_valid = pd.DataFrame({'DY_labels_valid':[i for i in Y_valid]}) #print("Valid shape:", df_valid.shape) #df_valid.to_csv("{0}.csv".format(sample), index=False) #del df_valid predict = model.predict(X_valid, batch_size=1000) #print(predict) df_predict = pd.DataFrame({ 'valid_pred': [i[0] for i in predict], 'labels_valid': [i for i in Y_valid], 'mva': [i[0] for i in MVA], 'decay_mode': [i[1] for i in MVA], 'mu_match': [i[2] for i in MVA], 'el_match': [i[3] for i in MVA], 'tau_match': [i[4] for i in MVA]