**autonet_config) # Test logging cont. autonet.pipeline[LogFunctionsSelector.get_name()].add_log_function( name=test_predictions_for_ensemble.__name__, log_function=test_predictions_for_ensemble(autonet, X_test, y_test), loss_transform=False) autonet.pipeline[LogFunctionsSelector.get_name()].add_log_function( name=test_result_ens.__name__, log_function=test_result_ens(autonet, X_test, y_test)) autonet.pipeline[BaselineTrainer.get_name()].add_test_data(X_test) print(autonet.get_current_autonet_config()) fit_results = autonet.fit(X_train, y_train, **autonet.get_current_autonet_config()) score = autonet.score(X_test, y_test) if y_test is not None else None print("Test score:", score) # Write to json results = dict() results["run_id"] = int(args.run_id) results["test_score"] = score results["seed"] = int(seed) with open(logdir + "/results_dump.json", "w") as f: json.dump(results, f)
__author__ = "Max Dippel, Michael Burkart and Matthias Urban" __version__ = "0.0.1" __license__ = "BSD" import os, sys sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", ".."))) from autoPyTorch import AutoNetClassification, AutoNetEnsemble from autoPyTorch.data_management.data_manager import DataManager # Note: You can write your own datamanager! Call fit with respective train, valid data (numpy matrices) dm = DataManager() dm.generate_classification(num_classes=3, num_features=21, num_samples=1500) # Note: every parameter has a default value, you do not have to specify anything. The given parameter allow a fast test. autonet = AutoNetEnsemble(AutoNetClassification, budget_type='epochs', min_budget=1, max_budget=9, num_iterations=1, log_level='debug') res = autonet.fit(X_train=dm.X, Y_train=dm.Y, cross_validator="k_fold", cross_validator_args={"n_splits": 3}, validation_split=0.2, ensemble_only_consider_n_best=3) print(res) print("Score:", autonet.score(X_test=dm.X_train, Y_test=dm.Y_train))