def get_endd_measures(n_models_base_names, n_models_list, endd_base_model,
                      dataset_name, test_images, test_labels):
    endd_measures_list = []
    for base_name in n_models_base_names:
        endd_measures = defaultdict(list)
        for n_models in n_models_list:
            #print("{}/{}".format(base_name, n_models))
            #print(n_models)
            endd_model_name = base_name + '_N_MODELS={}'.format(n_models)
            print(endd_model_name)
            uncompiled_model = saveload.load_tf_model(endd_model_name,
                                                      compile=False)
            endd_model = endd.get_model(uncompiled_model,
                                        dataset_name=dataset_name,
                                        compile=True)

            evaluation_result = evaluation.calc_classification_measures(
                endd_model,
                test_images,
                test_labels,
                wrapper_type='individual')
            #print("############# Measures")
            for measure, value in evaluation_result.items():
                endd_measures[measure].append(value)
                #print("{}={}".format(measure, value))
            #print()
        endd_measures_list.append(endd_measures)
    return endd_measures_list
def predict_endd():
    """Predicts and saves the predictions of the ENDD-models to file"""

    # Load model
    MODEL_SAVE_NAMES = [
        "endd_small_net_spiral", "endd_AUX_small_net_spiral",
        "endd_AUX_20_small_net_spiral", "endd_AUX_ANN_small_net_spiral",
        "endd_AUX_T25_small_net_spiral"
    ]
    PREDICT_SAVE_NAMES = [
        "endd", "endd_AUX", "endd_AUX_20", "endd_AUX_ANN", "endd_AUX_T25"
    ]

    # Loop for aux or no aux
    for i in range(len(MODEL_SAVE_NAMES)):
        print(i)

        MODEL_SAVE_NAME = MODEL_SAVE_NAMES[i]
        PREDICT_SAVE_NAME = PREDICT_SAVE_NAMES[i]

        endd_model = saveload.load_tf_model(MODEL_SAVE_NAME, compile=False)
        endd_model = endd.get_model(endd_model,
                                    init_temp=1,
                                    teacher_epsilon=1e-4)

        # Load data
        (x_train, y_train), (x_test, y_test) = datasets.get_dataset("spiral")
        grid = get_grid(size=2000, steps=1000)

        # Predict
        endd_logits_train = endd_model.predict(x_train)
        endd_logits_test = endd_model.predict(x_test)
        endd_logits_grid = endd_model.predict(grid)

        with open('train_small_net_spiral_{}.pkl'.format(PREDICT_SAVE_NAME),
                  'wb') as file:
            pickle.dump((x_train, y_train, endd_logits_train), file)
        with open('test_small_net_spiral_{}.pkl'.format(PREDICT_SAVE_NAME),
                  'wb') as file:
            pickle.dump((x_test, y_test, endd_logits_test), file)
        with open('grid_small_net_spiral_{}.pkl'.format(PREDICT_SAVE_NAME),
                  'wb') as file:
            pickle.dump((grid, 0, endd_logits_grid), file)
#                             dataset_name=DATASET_NAME,
#                             compile=True,
#                             weights=ENDD_MODEL_NAME)
# endd_tot_wrapper_type = 'individual'
# endd_know_wrapper_type = 'priornet'
#
# # Prepare ENDD+AUX model
# endd_aux_model = endd.get_model(ENDD_AUX_BASE_MODEL,
#                                 dataset_name=DATASET_NAME,
#                                 compile=True,
#                                 weights=ENDD_AUX_MODEL_NAME)
# endd_aux_tot_wrapper_type = 'individual'
# endd_aux_know_wrapper_type = 'priornet'

# Prepare PN+AUX model
pn_base_model = saveload.load_tf_model(PN_AUX_MODEL_NAME, compile=False)
pn_aux_model = cnn_priorNet.get_model(pn_base_model,
                                      dataset_name=DATASET_NAME,
                                      compile=True)
pn_aux_tot_wrapper_type = 'individual'
pn_aux_know_wrapper_type = 'priornet'

# Load data
_, (in_images, _) = datasets.get_dataset(DATASET_NAME)
_, out_images = datasets.get_dataset(OUT_DATASET_NAME)

# Preprocess data
in_images = preprocessing.normalize_minus_one_to_one(in_images, min=0, max=255)
out_images = preprocessing.normalize_minus_one_to_one(out_images,
                                                      min=0,
                                                      max=255)
from utils import datasets
from utils import saveload
from models import ensemble

ENSEMBLE_NAME = 'basic_cnn'
DATASET_NAME = 'cifar10'

# Load ensemble model
ensemble_model_names = saveload.get_ensemble_model_names()
model_names = ensemble_model_names[ENSEMBLE_NAME][DATASET_NAME][:3]
models = [ensemble.KerasLoadsWhole(name) for name in model_names]
ensm = ensemble.Ensemble(models)
ensm_wrapper_type = 'ensemble'

# Load individual model
ind = saveload.load_tf_model(model_names[0])
ind_wrapper_type = 'individual'

# Load data
_, (test_images, test_labels) = datasets.get_dataset(DATASET_NAME)

# Preprocess data
test_labels = test_labels.reshape(-1)

# Calculate measures
ensm_measures = evaluation.calc_classification_measures(
    ensm, test_images, test_labels, wrapper_type=ensm_wrapper_type)

ind_measures = evaluation.calc_classification_measures(
    ind, test_images, test_labels, wrapper_type=ind_wrapper_type)
예제 #5
0
    # Predict ensemble
    ensm_preds = ensm_model.predict(test_images)
    with open("ensm_preds.pkl", 'wb') as file:
        pickle.dump((ensm_preds), file)
    ensm_preds_noise = ensm_model.predict(noise_img)
    with open("ensm_preds_noise.pkl", 'wb') as file:
        pickle.dump((ensm_preds_noise), file)

if LOAD_PREVIOUS_ENDD_PREDS:
    with open("endd_preds.pkl", 'rb') as file:
        endd_preds = pickle.load(file)
    with open("endd_preds_noise.pkl", 'rb') as file:
        endd_preds_noise = pickle.load(file)
else:
    # Load endd
    endd_base_model = saveload.load_tf_model(ENDD_MODEL_NAME, compile=False)
    endd_model = endd.get_model(endd_base_model,
                                init_temp=1,
                                teacher_epsilon=1e-4)

    # Predict endd
    endd_preds = endd_model.predict(test_images)
    with open("endd_preds.pkl", 'wb') as file:
        pickle.dump((endd_preds), file)
    endd_preds_noise = endd_model.predict(noise_img)
    with open("endd_preds_noise.pkl", 'wb') as file:
        pickle.dump((endd_preds_noise), file)

# Plot random images
if PLOT_COLLAGE:
    in_indices = np.where((test_labels == 4) | (test_labels == 5)
예제 #6
0
IND_MODEL_NAME = 'vgg_cifar10_cifar10_0'
ENSM_MODEL_NAME, ENSM_N_MODELS = 'vgg', 1

END_MODEL_NAME, END_BASE_MODEL = 'end_vgg_cifar10_100_{}'.format(nr), 'vgg'
END_AUX_MODEL_NAME, END_AUX_BASE_MODEL = 'end_vgg_cifar10_100_aux_{}'.format(
    nr), 'vgg'

ENDD_MODEL_NAME, ENDD_BASE_MODEL = 'endd_vgg_cifar10_100_{}'.format(nr), 'vgg'
ENDD_AUX_MODEL_NAME, ENDD_AUX_BASE_MODEL = 'cifar10_vgg_endd_aux_{}_N_MODELS=100'.format(
    nr - 1), 'vgg'

# Choose dataset
DATASET_NAME = 'cifar10'

# Prepare IND model
ind_model = saveload.load_tf_model(IND_MODEL_NAME)
ind_wrapper_type = 'individual'

# Prepare ENSM model
ensemble_model_names = saveload.get_ensemble_model_names()
model_names = ensemble_model_names[ENSM_MODEL_NAME][
    DATASET_NAME][:ENSM_N_MODELS]
models = [
    ensemble.KerasLoadsWhole(name, pop_last=True) for name in model_names
]
ensm_model = ensemble.Ensemble(models)
ensm_wrapper_type = 'ensemble'

# Prepare END model
end_model = endd.get_model(END_BASE_MODEL,
                           dataset_name=DATASET_NAME,
예제 #7
0
 def get_model(self):
     """Return the loaded Keras model."""
     model = saveload.load_tf_model(self.model_load_name)
     if self.pop_last:
         model.pop()
     return model