def get_bu(model, X_test, X_test_noisy, X_test_adv): """ Get Bayesian uncertainty scores :param model: :param X_train: :param Y_train: :param X_test: :param X_test_noisy: :param X_test_adv: :return: artifacts: positive and negative examples with bu values, labels: adversarial (label: 1) and normal/noisy (label: 0) examples """ print('Getting Monte Carlo dropout variance predictions...') uncerts_normal = get_mc_predictions(model, X_test, batch_size=args.batch_size) \ .var(axis=0).mean(axis=1) uncerts_noisy = get_mc_predictions(model, X_test_noisy, batch_size=args.batch_size) \ .var(axis=0).mean(axis=1) uncerts_adv = get_mc_predictions(model, X_test_adv, batch_size=args.batch_size) \ .var(axis=0).mean(axis=1) print("uncerts_normal:", uncerts_normal.shape) print("uncerts_noisy:", uncerts_noisy.shape) print("uncerts_adv:", uncerts_adv.shape) ## skip the normalization, you may want to try different normalizations later ## so at this step, just save the raw values # uncerts_normal_z, uncerts_adv_z, uncerts_noisy_z = normalize( # uncerts_normal, # uncerts_adv, # uncerts_noisy # ) uncerts_pos = uncerts_adv uncerts_neg = np.concatenate((uncerts_normal, uncerts_noisy)) artifacts, labels = merge_and_generate_labels(uncerts_pos, uncerts_neg) return artifacts, labels