def train_models():
    '''Trains an ensemble of models on the spiral dataset.'''

    MODEL_TYPE = "small_net"
    ENSEMBLE_SAVE_NAME = "small_net"
    DATASET_NAME = "spiral"
    NAME_START_NUMBER = 0
    N_MODELS = 100
    N_EPOCHS = 85

    # Import data
    (x_train, y_train), (x_test, y_test) = datasets.get_dataset(DATASET_NAME)
    y_train_one_hot = tf.one_hot(y_train.reshape((-1, )),
                                 settings.DATASET_N_CLASSES[DATASET_NAME])
    y_test_one_hot = tf.one_hot(y_test.reshape((-1, )),
                                settings.DATASET_N_CLASSES[DATASET_NAME])

    # Train models
    model_module = settings.MODEL_MODULES[MODEL_TYPE]
    saved_model_names = []
    try:
        for i in range(N_MODELS):
            # Get model
            model = model_module.get_model(dataset_name=DATASET_NAME,
                                           compile=True)

            # Train model
            model.fit(x_train,
                      y_train_one_hot,
                      validation_data=(x_test, y_test_one_hot),
                      epochs=N_EPOCHS,
                      verbose=2)
            print("Model {} finished training.".format(i))

            # Save model
            model_name = "{}_{}_{}".format(ENSEMBLE_SAVE_NAME, DATASET_NAME, i)
            saveload.save_tf_model(model, model_name)
            saved_model_names.append(model_name)

    finally:
        append_model_names = NAME_START_NUMBER > 0
        saveload.update_ensemble_names(ENSEMBLE_SAVE_NAME,
                                       DATASET_NAME,
                                       saved_model_names,
                                       append=append_model_names)
def train_end():
    """Trains an END and and END_AUX model on the ensemble predictions"""

    # Name
    ENSEMBLE_SAVE_NAME = 'small_net'  # Name that the ensemble models will be saved with
    DATASET_NAME = 'spiral'  # Name of dataset models were trained with
    MODEL_SAVE_NAME = "end_small_net_spiral"
    MODEL_SAVE_NAME_AUX = "end_AUX_small_net_spiral"

    # Load data
    with open('train_small_net_spiral.pkl', 'rb') as file:
        x_train, y_train, ensemble_logits_train = pickle.load(file)
    with open('train_aux_small_net_spiral.pkl', 'rb') as file:
        x_train_aux, y_train_aux, ensemble_logits_train_aux = pickle.load(file)

    # Build ENDD model
    base_model = get_model(DATASET_NAME, compile=False)
    end_model = end.get_model(base_model, init_temp=1)

    base_model_AUX = get_model(DATASET_NAME, compile=False)
    end_model_AUX = end.get_model(base_model_AUX, init_temp=1)

    # Train model
    end_model.fit(x_train,
                  np.transpose(ensemble_logits_train, (1, 0, 2)),
                  epochs=150)
    end_model_AUX.fit(x_train_aux,
                      np.transpose(ensemble_logits_train_aux, (1, 0, 2)),
                      epochs=500)

    # Save model
    saveload.save_tf_model(end_model, MODEL_SAVE_NAME)
    saveload.save_tf_model(end_model_AUX, MODEL_SAVE_NAME_AUX)

    # Evaluate model
    _, (x_test, y_test) = datasets.get_dataset("spiral")
    logits = end_model.predict(x_test)
    logits_aux = end_model_AUX.predict(x_test)
    print(np.argmax(logits, axis=1))
    print(np.argmax(logits_aux, axis=1))
    print(y_test)
    print(sklearn.metrics.accuracy_score(y_test, np.argmax(logits, axis=1)))
    print(sklearn.metrics.accuracy_score(y_test, np.argmax(logits_aux,
                                                           axis=1)))
Example #3
0
import settings
from models import cnn
from utils import saveload
from utils import datasets

# Need these settings for GPU to work on my computer /Einar
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)

# Load data
(train_images,
 train_labels), (test_images, test_labels) = datasets.get_dataset(DATASET_NAME)

# Preprocess
train_labels = tf.one_hot(train_labels.reshape((-1, )),
                          settings.DATASET_N_CLASSES[DATASET_NAME])
test_labels = tf.one_hot(test_labels.reshape((-1, )),
                         settings.DATASET_N_CLASSES[DATASET_NAME])

# Get model
model = cnn.get_model(dataset_name=DATASET_NAME, compile=True)

# Train
model.fit(train_images,
          train_labels,
          epochs=N_EPOCHS,
          validation_data=(test_images, test_labels))

# Save weights
saveload.save_tf_model(model, "cnn")
model_module = settings.MODEL_MODULES[MODEL_TYPE]

# Train and save ensemble
# Try-finally construct ensures that the list of trained models in the ensemble is
# update correctly even if script fails before all models have been trained and saved.
try:
    saved_model_names = []
    for i in range(NAME_START_NUMBER, N_MODELS + NAME_START_NUMBER):
        print("Training model {}...".format(i))
        # Get model
        model = model_module.get_model(dataset_name=DATASET_NAME, compile=True)

        # Train model
        model.fit(train_images,
                  train_labels,
                  epochs=N_EPOCHS,
                  validation_data=(test_images, test_labels))
        print("Model {} finished training.".format(i))

        # Save model
        model_name = "{}_{}_{}".format(ENSEMBLE_SAVE_NAME, DATASET_NAME, i)
        saveload.save_tf_model(model, model_name)
        saved_model_names.append(model_name)
finally:
    # Updates ensemble record with list of saved models
    append_model_names = NAME_START_NUMBER > 0
    saveload.update_ensemble_names(ENSEMBLE_SAVE_NAME,
                                   DATASET_NAME,
                                   saved_model_names,
                                   append=append_model_names)
Example #5
0
            init_temp=INIT_TEMP,
            dropout_rate=DROPOUT_RATE,
            save_endd_dataset=save,
            load_previous_endd_dataset=load,
            repetition=rep)
        endd_measures = evaluation.calc_classification_measures(
            endd_model, test_images, test_labels, wrapper_type='individual')
        print("############# ENDD Measures")
        for measure, value in endd_measures.items():
            measures['endd'][measure].append(value)
            print("{}={}".format(measure, value))
        print()

        if MODEL_BASE_SAVE_NAME:
            saveload.save_tf_model(
                endd_model, MODEL_BASE_SAVE_NAME +
                "_{}".format(nr_repetition) + '_N_MODELS={}'.format(n_models))
            saveload.save_weights(
                endd_model, MODEL_BASE_SAVE_NAME +
                "_{}".format(nr_repetition) + '_N_MODELS={}'.format(n_models))

    print(measures)
'''
# Plot results
plt.subplot(2, 2, 1)
plt.plot(N_MODELS_LIST, measures['endd']['err'], label='ENDD+AUX')
plt.plot(N_MODELS_LIST, measures['ensm']['err'], label='ENSM')
plt.xlabel("Number of models")
plt.ylabel("Prediction Error")
plt.legend()
def train_endd():
    """Trains an ENDD and and ENDD_AUX model on the ensemble predictions"""

    # Name
    ENSEMBLE_SAVE_NAME = 'small_net'  # Name that the ensemble models will be saved with
    DATASET_NAME = 'spiral'  # Name of dataset models were trained with
    MODEL_SAVE_NAME = "endd_small_net_spiral"
    MODEL_SAVE_NAME_AUX = "endd_AUX_small_net_spiral"
    MODEL_SAVE_NAME_AUX_20 = "endd_AUX_20_small_net_spiral"
    MODEL_SAVE_NAME_AUX_ANN = "endd_AUX_ANN_small_net_spiral"
    MODEL_SAVE_NAME_AUX_T25 = "endd_AUX_T25_small_net_spiral"

    # Load data
    with open('train_small_net_spiral.pkl', 'rb') as file:
        x_train, y_train, ensemble_logits_train = pickle.load(file)
    with open('train_aux_small_net_spiral.pkl', 'rb') as file:
        x_train_aux, y_train_aux, ensemble_logits_train_aux = pickle.load(file)
    with open('train_aux_20_small_net_spiral.pkl', 'rb') as file:
        x_train_aux_20, y_train_aux_20, ensemble_logits_train_aux_20 = pickle.load(
            file)

    # Build ENDD model
    base_model = get_model(DATASET_NAME, compile=False)
    endd_model = endd.get_model(base_model, init_temp=1, teacher_epsilon=1e-4)

    base_model_AUX = get_model(DATASET_NAME, compile=False)
    base_model_AUX_20 = get_model(DATASET_NAME, compile=False)
    base_model_AUX_ANN = get_model(DATASET_NAME, compile=False)
    base_model_AUX_T25 = get_model(DATASET_NAME, compile=False)

    endd_model_AUX = endd.get_model(base_model_AUX,
                                    init_temp=1,
                                    teacher_epsilon=1e-4)
    endd_model_AUX_20 = endd.get_model(base_model_AUX_20,
                                       init_temp=1,
                                       teacher_epsilon=1e-4)
    endd_model_AUX_ANN = endd.get_model(base_model_AUX_ANN,
                                        init_temp=2.5,
                                        teacher_epsilon=1e-4)
    endd_model_AUX_T25 = endd.get_model(base_model_AUX_T25,
                                        init_temp=2.5,
                                        teacher_epsilon=1e-4)

    # Train model
    #endd_model.fit(x_train, np.transpose(ensemble_logits_train, (1, 0, 2)), epochs=500)
    #endd_model_AUX.fit(x_train_aux, np.transpose(ensemble_logits_train_aux, (1, 0, 2)), epochs=500)
    #endd_model_AUX_20.fit(x_train_aux_20, np.transpose(ensemble_logits_train_aux_20, (1, 0, 2)), epochs=500)
    endd_model_AUX_ANN.fit(x_train_aux,
                           np.transpose(ensemble_logits_train_aux, (1, 0, 2)),
                           epochs=500,
                           callbacks=[
                               callbacks.TemperatureAnnealing(init_temp=2.5,
                                                              cycle_length=400,
                                                              epochs=500)
                           ])
    endd_model_AUX_T25.fit(x_train_aux,
                           np.transpose(ensemble_logits_train_aux, (1, 0, 2)),
                           epochs=500)

    # Save model
    saveload.save_tf_model(endd_model, MODEL_SAVE_NAME)
    saveload.save_tf_model(endd_model_AUX, MODEL_SAVE_NAME_AUX)
    saveload.save_tf_model(endd_model_AUX_20, MODEL_SAVE_NAME_AUX_20)
    saveload.save_tf_model(endd_model_AUX_ANN, MODEL_SAVE_NAME_AUX_ANN)
    saveload.save_tf_model(endd_model_AUX_T25, MODEL_SAVE_NAME_AUX_T25)

    # Evaluate model
    _, (x_test, y_test) = datasets.get_dataset("spiral")
    logits = endd_model_AUX.predict(x_test)
    print(np.argmax(logits, axis=1))
    print(y_test)
    print(sklearn.metrics.accuracy_score(y_test, np.argmax(logits, axis=1)))
Example #7
0
elif NORMALIZATION == 'gaussian':
    train_images, mean, std = preprocessing.normalize_gaussian(train_images)
    test_images = preprocessing.normalize_gaussian(test_images, mean, std)

end_model = training.train_vgg_end(train_images=train_images,
                                   ensemble_model=ensemble_model,
                                   dataset_name=DATASET_NAME,
                                   batch_size=BATCH_SIZE,
                                   n_epochs=N_EPOCHS,
                                   one_cycle_lr_policy=ONE_CYCLE_LR_POLICY,
                                   init_lr=INIT_LR,
                                   cycle_length=CYCLE_LENGTH,
                                   temp_annealing=TEMP_ANNEALING,
                                   init_temp=INIT_TEMP,
                                   dropout_rate=DROPOUT_RATE,
                                   save_end_dataset=False,
                                   load_previous_end_dataset=True)

measures = evaluation.calc_classification_measures(end_model,
                                                   test_images,
                                                   test_labels,
                                                   wrapper_type='individual')

if MODEL_SAVE_NAME:
    # Note: There seems to be some difficulties when trying to load whole model with custom loss
    saveload.save_tf_model(end_model, MODEL_SAVE_NAME)
    saveload.save_weights(end_model, MODEL_SAVE_NAME)

results = evaluation.format_results(['end'], [measures])
print(results)
Example #8
0
import settings

from utils import preprocessing, saveload, simplex, losses, training
from models import cnn_priorNet
MODEL = 'vgg'
DATASET = 'cifar10'
SAVE_WEIGHTS = True
NORMALIZATION = "-1to1"
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
(OOD_images, _), (_, _) = datasets.cifar100.load_data()

train_images, train_alphas, test_images, test_alphas = preprocessing.preprocess_cifar_for_priornet(
    train_images,
    train_labels,
    test_images,
    test_labels,
    normalization=NORMALIZATION,
    OOD_images=OOD_images)

model = training.train_pn(train_images, train_alphas, DATASET, MODEL)

if SAVE_WEIGHTS:
    saveload.save_tf_model(model, "PN_vgg_cifar10_aux_c")
alphas = tf.math.exp(model.predict(test_images))

predictions = tf.math.argmax(tf.squeeze(alphas), axis=1)
real = tf.math.argmax(tf.squeeze(test_alphas), axis=1)

score = tf.math.reduce_sum(tf.cast(predictions == real, tf.float32)) / len(real)
Example #9
0
            init_lr=INIT_LR,
            cycle_length=CYCLE_LENGTH,
            temp_annealing=TEMP_ANNEALING,
            init_temp=init_temp,
            dropout_rate=DROPOUT_RATE,
            save_endd_dataset=(not load_previous_dataset),
            load_previous_endd_dataset=load_previous_dataset)
        load_previous_dataset = True  # Load previous to make training faster
        endd_measures = evaluation.calc_classification_measures(
            endd_model, test_images, test_labels, wrapper_type='individual')
        for measure, value in endd_measures.items():
            measures[measure].append(value)

        if MODEL_BASE_SAVE_NAME:
            saveload.save_tf_model(
                endd_model, MODEL_BASE_SAVE_NAME +
                "_{}".format(nr_repetition) + '_TEMP={}'.format(init_temp))
            saveload.save_weights(
                endd_model, MODEL_BASE_SAVE_NAME +
                "_{}".format(nr_repetition) + '_TEMP={}'.format(init_temp))

    print(measures)
'''

# Plot results
plt.subplot(2, 2, 1)
plt.plot(INIT_TEMP_LIST, measures['err'], label='ENDD+AUX')
plt.xlabel("Initial temperature")
plt.ylabel("Prediction Error")
plt.legend()
from models.dense_priornet import get_model
from utils.create_toy_data import create_mixed_data
import utils.saveload as saveload

dataset = "spiral"
BATCH_SIZE = 100
N_EPOCHS = 20
PLOT_SIMPLEX = False
SAVE_WEIGHTS = False

X, Y = create_mixed_data(settings.SAMPLES_PER_CLASS_PN,
                         settings.SAMPLES_OOD_PN,
                         settings.DATASET_N_CLASSES[dataset],
                         radius=settings.RADIUS_PN,
                         ID_noise=settings.ID_NOISE_PN,
                         OOD_noise=settings.OOD_NOISE_PN)

x_train, logits_train, x_test, logits_test = preprocessing.preprocess_toy_dataset(
    X, Y, 0.8)
model = get_model(dataset, compile=True)

model.fit(x=x_train, y=logits_train, batch_size=BATCH_SIZE, epochs=N_EPOCHS)
if SAVE_WEIGHTS:
    saveload.save_tf_model(model, "dense_priornet")

logits = model.predict(x_test)
predictions = tf.math.argmax(logits, axis=1)
real = tf.math.argmax(logits_test, axis=1)
if PLOT_SIMPLEX and settings.DATASET_N_CLASSES[dataset] == 3:
    plot_simplex(logits)