filename1 = 'adv_mnist_results_5-20-50-trial%s' % t filename2 = 'adv_digits_results_5-20-50-trial%s' % t mnist_results = { 'ensemble_cerror' : ensemble_cerror, 'ensemble_entropy' : entropy_ensemble #'ensemble_adv_cerror' : ensemble_adv_cerror, #'ensemble_adv_entropy' : entropy_adv_ensemble, #'voting_entropy' : entropy_vote #'voting_adv_entropy' : entropy_adv_vote } digits_results = { 'ensemble_cerror' : digits_cerror, 'ensemble_entropy' : digits_entropy #'ensemble_adv_cerror' : digits_adv_cerror, #'ensemble_adv_entropy' : digits_adv_entropy, #'voting_entropy' : digits_vote #'voting_adv_entropy' : digits_adv_vote } utils.save_processed_data(mnist_results,filename1) utils.save_processed_data(digits_results,filename2) return digits_taus, mnist_results, digits_results utils.setup_gpu_session() taus, mnist_results ,digits_results = experiment(network_model1, 'mlp')
"type" : "MaxPooling2D", "pool_size" : [2,2], "strides" : [2,2] }, { "type" : "Flatten" }, { "type" : "Dense", "units" : 10, "activation" : "softmax" } ] } """ utils.setup_gpu_session(True) xtrain, ytrain, xtest, ytest = utils.load_mnist() xtrain = xtrain.reshape(60000, 28, 28, 1) xtest = xtest.reshape(10000, 28, 28, 1) model_conf = json.loads(network_model1) inputs, outputs, train_model, model_list, merge_model = ann.build_ensemble( [model_conf]) ensemble_size = len(model_list) lossfunctions = [ ann.adveserial_loss(klosses.categorical_crossentropy, m) for m in model_list ] train_model.compile(optimizer="adam", loss=lossfunctions, metrics=["accuracy"]) train_model.fit([xtrain] * ensemble_size, [ytrain] * ensemble_size,