def test_models_with_same_random_state_have_same_loss_and_accuracy( environ, mnist): X_train, _, X_test, y_test = mnist one_hot_y_test = keras.utils.to_categorical(y_test, 10) command = [ "python", "bin/train-model.py", "--pca", "--n-components", "20", "--epochs", "2" ] process = subprocess.run(command, stdout=subprocess.PIPE) model = fc_100_100_10() model.load_weights( "/tmp/model/pca-filtered-model-20-components/weights.h5") with open("/tmp/model/pca-filtered-model-20-components/pca.pkl", "rb") as f: pca = load(f) model = pca_filtered_model(model, X_train, pca=pca) expected = model.evaluate(X_test, one_hot_y_test) shutil.rmtree("/tmp/model/pca-filtered-model-20-components/") process = subprocess.run(command, stdout=subprocess.PIPE) model = fc_100_100_10() model.load_weights( "/tmp/model/pca-filtered-model-20-components/weights.h5") with open("/tmp/model/pca-filtered-model-20-components/pca.pkl", "rb") as f: pca = load(f) model = pca_filtered_model(model, X_train, pca=pca) actual = model.evaluate(X_test, one_hot_y_test) assert allclose(expected, actual, atol=0.001)
def test_kernelpca_filtered_model_is_built_correctly(mnist): X_train, y_train, X_test, y_test = mnist model = kernelpca_filtered_model(fc_100_100_10(), X_train[:1000], 10) # use a slice of X_train or you may run out of memory on Travis builds assert type(model.sklearn_transformer) is KernelPCA assert model.name == "kernelpca-filtered-model-10-components"
def test_pca_filtered_keeping_10_components_structure(mnist): X_train, y_train, X_test, y_test = mnist model = pca_filtered_model(fc_100_100_10(), X_train, 10) assert len(model.inputs) == 1 assert model.input.shape.as_list() == [None, 28, 28] assert len(model.outputs) == 1 assert model.output.shape.as_list() == [None, 10] assert len(model.layers) == 7
def test_fc_100_100_10_structure(mnist): model = fc_100_100_10() X_train, y_train, X_test, y_test = mnist assert len(model.inputs) == 1 assert model.inputs[0].shape.as_list() == [None, 28, 28] assert len(model.outputs) == 1 assert model.outputs[0].shape.as_list() == [None, 10] assert len(model.layers) == 7
def save_models(): X_train, y_train, X_test, y_test = mnist() prefix = "/tmp" model = pca_filtered_model(fc_100_100_10(), X_train, 10) save_to_file(model, prefix) model = fastica_filtered_model(fc_100_100_10(), X_train, 10) save_to_file(model, prefix) model = truncatedsvd_filtered_model(fc_100_100_10(), X_train, 10) save_to_file(model, prefix) model = kernelpca_filtered_model(fc_100_100_10(), X_train[:1000], 10) save_to_file(model, prefix) model = incrementalpca_filtered_model(fc_100_100_10(), X_train, 10) save_to_file(model, prefix) model = nmf_filtered_model(fc_100_100_10(), X_train, 10) save_to_file(model, prefix) yield shutil.rmtree("/tmp/model/pca-filtered-model-10-components") shutil.rmtree("/tmp/model/fastica-filtered-model-10-components") shutil.rmtree("/tmp/model/truncatedsvd-filtered-model-10-components") shutil.rmtree("/tmp/model/kernelpca-filtered-model-10-components") shutil.rmtree("/tmp/model/incrementalpca-filtered-model-10-components") shutil.rmtree("/tmp/model/nmf-filtered-model-10-components")
help="default: let the model choose") argument_parser.add_argument("--random-seed", action="store_true", help="initialize model with random seed") args = argument_parser.parse_args() PREFIX = os.environ.get('PREFIX', '.') X_train, y_train, X_test, y_test = mnist() if not args.random_seed: K.clear_session() tf.set_random_seed(1234) np.random.seed(1234) no_defense_model = fc_100_100_10() print(f"Training {no_defense_model.name}...") train(no_defense_model, X_train, y_train, args.epochs, verbose=True, stop_on_stable_weights=True, reduce_lr_on_plateau=True, stop_on_stable_weights_patience=60, reduce_lr_on_plateau_patience=30) print(f"Saving {no_defense_model.name}...") save_to_file(no_defense_model, PREFIX) for n_components in args.n_components:
return second argument_parser = argparse.ArgumentParser() args = argument_parser.parse_args() X_train, y_train, X_test, y_test = mnist() models_win_counter = { "reconstructed_model": 0, "retrained_model": 0, "reretrained_model": 0, } for n_components in [784, 331, 100, 80, 60, 20]: reconstructed_model = pca_filtered_model(fc_100_100_10(), X_train, n_components) train(reconstructed_model, X_train, y_train, epochs=-1, stop_on_stable_weights=True) retrained_model = pca_filtered_model(fc_100_100_10(), X_train, n_components) X_train = retrained_model.preprocessing_fn(X_train) train(retrained_model, X_train, y_train, epochs=-1, stop_on_stable_weights=True)
def test_nmf_filtered_model_is_built_correctly(mnist): X_train, y_train, X_test, y_test = mnist model = nmf_filtered_model(fc_100_100_10(), X_train, 10) assert type(model.sklearn_transformer) is NMF assert model.name == "nmf-filtered-model-10-components"
def test_fastica_filtered_model_is_built_correctly(mnist): X_train, y_train, X_test, y_test = mnist model = fastica_filtered_model(fc_100_100_10(), X_train, 10) assert type(model.sklearn_transformer) is FastICA assert model.name == "fastica-filtered-model-10-components"
def test_pca_filtered_keeping_10_components_accuracy(mnist): X_train, y_train, X_test, y_test = mnist model = pca_filtered_model(fc_100_100_10(), X_train, 10) train(model, X_train, y_train, epochs=2) assert isclose(accuracy(model, X_test, y_test), 0.44, abs_tol=0.01)
def test_fc_100_100_10_accuracy(mnist): model = fc_100_100_10() X_train, y_train, X_test, y_test = mnist train(model, X_train, y_train, epochs=2) assert isclose(accuracy(model, X_test, y_test), 0.544, abs_tol=0.01)
def test_incrementalpca_filtered_model_is_built_correctly(mnist): X_train, y_train, X_test, y_test = mnist model = incrementalpca_filtered_model(fc_100_100_10(), X_train, 10) assert type(model.sklearn_transformer) is IncrementalPCA assert model.name == "incrementalpca-filtered-model-10-components"
def test_truncatedsvd_filtered_model_is_built_correctly(mnist): X_train, y_train, X_test, y_test = mnist model = truncatedsvd_filtered_model(fc_100_100_10(), X_train, 10) assert type(model.sklearn_transformer) is TruncatedSVD assert model.name == "truncatedsvd-filtered-model-10-components"
"epochs": -1, "early_stopping": True, "reduce_lr_on_plateau": True, "early_stopping_patience": 60 }, { "epochs": -1, "stop_on_stable_weights": True, "reduce_lr_on_plateau": True, "reduce_lr_on_plateau_patience": 30 }, ] accuracies_list = [] epochs_list = [] X_train, y_train, X_test, y_test = mnist() for kwargs in kwargs_list: model = fc_100_100_10() history = train(model, X_train, y_train, **kwargs) n_epochs = len(history.epoch) test_set_accuracy = accuracy(model, X_test, y_test) accuracies_list.append(test_set_accuracy) epochs_list.append(n_epochs) print("#" * 80) for kwargs, test_set_accuracy, epochs in zip(kwargs_list, accuracies_list, epochs_list): print(f"{kwargs} -> {test_set_accuracy}, trained for {epochs} epochs") print("#" * 80)