elif args.htru2: datasets.append(htru2_details) elif not args.statlog and not args.htru2: datasets.append(statlog_details) datasets.append(htru2_details) experiment_details = [] for ds in datasets: data = ds['data'] data.load_and_process() data.build_train_test_split() data.scale_standard() experiment_details.append( experiments.ExperimentDetails(data, ds['name'], ds['readable_name'], threads=threads, seed=seed)) if args.all or args.benchmark or args.ica or args.pca or args.lda or args.svd or args.rf or args.rp: if verbose: logger.info("----------") logger.info("Running experiments") timings = {} if args.benchmark or args.all: run_experiment(experiment_details, experiments.BenchmarkExperiment, 'Benchmark', args.dim, args.skiprerun, verbose, timings)
'name': 'cliff_walking', 'readable_name': 'Cliff Walking (4x12)', }] experiment_details = [] for env in envs: env['env'].seed(seed) logger.info('{}: State space: {}, Action space: {}'.format( env['readable_name'], env['env'].unwrapped.nS, env['env'].unwrapped.nA)) experiment_details.append( experiments.ExperimentDetails(env['env'], env['name'], env['readable_name'], threads=threads, seed=seed)) if verbose: logger.info("----------") print('\n\n') logger.info("Running experiments") timings = {} if args.policy or args.all: print('\n\n') run_experiment(experiment_details, experiments.PolicyIterationExperiment, 'PI', verbose, timings, \ MAX_STEPS['pi'], NUM_TRIALS['pi'], theta=PI_THETA)
ds1_data.load_and_process() ds2_data = loader.PenDigitData(verbose=verbose, seed=seed) ds2_name = 'pen_digits' ds2_readable_name = 'Handwritten Digits' ds2_data.load_and_process() if verbose: print("----------") print("Running experiments") timings = {} experiment_details_ds1 = experiments.ExperimentDetails(ds1_data, ds1_name, ds1_readable_name, threads=threads, seed=seed) experiment_details_ds2 = experiments.ExperimentDetails(ds2_data, ds2_name, ds2_readable_name, threads=threads, seed=seed) if args.ann or args.all: t = datetime.now() experiment = experiments.ANNExperiment(experiment_details_ds1, verbose=verbose) experiment.perform() experiment = experiments.ANNExperiment(experiment_details_ds2,
datasets.append(dataset1_details) elif args.dataset2: datasets.append(dataset2_details) elif not args.dataset1 and not args.dataset2: datasets.append(dataset1_details) datasets.append(dataset2_details) experiment_details = [] for ds in datasets: data = ds['data'] data.load_and_process() data.build_train_test_split() data.scale_standard() experiment_details.append(experiments.ExperimentDetails( data, ds['name'], ds['readable_name'], ds['best_nn_params'], threads=threads, seed=seed )) if args.all or args.benchmark or args.ica or args.pca or args.lda or args.svd or args.rf or args.rp: if verbose: logger.info("----------") logger.info("Running experiments") timings = {} if args.benchmark or args.all: run_experiment(experiment_details, experiments.BenchmarkExperiment, 'Benchmark', args.dim, args.skiprerun, verbose, timings) if args.ica or args.all:
}, { 'env': environments.get_taxi_environment(), 'name': 'taxi', 'readable_name': 'Taxi problem', 'state_to_track': 14 }] experiment_details = [] for env in envs: env['env'].seed(seed) logger.info('{}: State space: {}, Action space: {}'.format( env['readable_name'], env['env'].unwrapped.nS, env['env'].unwrapped.nA)) experiment_details.append( experiments.ExperimentDetails(env['env'], env['name'], env['readable_name'], seed, env['state_to_track'])) if verbose: logger.info("----------") logger.info("Running experiments") timings = {} if args.policy or args.all: run_experiment(experiment_details, experiments.PolicyIterationExperiment, 'PI', verbose, timings) if args.value or args.all: run_experiment(experiment_details,
if seed is None: seed = np.random.randint(0, (2**32) - 1) logger.info("Seed: {}".format(seed)) logger.info("Available datasets: {}".format(DATASETS.keys())) logger.info("Selected dataset: {}".format(args.dataset)) timings = {} data_loader = dataset["loader"](verbose=verbose, seed=seed) data_loader.set_logger(logger) data_loader.load_and_process() data_loader.build_train_test_split() data_loader.scale_standard() experiment_details = experiments.ExperimentDetails( data_loader, args.dataset, dataset["readable_name"], threads=threads, seed=seed) if args.ann or args.all: run_experiment(experiment_details, experiments.ANNExperiment, "ANN", verbose, timings) if args.boosting or args.all: run_experiment(experiment_details, experiments.BoostingExperiment, "Boosting", verbose, timings) if args.dt or args.all: run_experiment(experiment_details, experiments.DTExperiment, "DT", verbose, timings)
datasets = [ds1_details, ds2_details] datasets = [enhancer_brain, wine_quality_details] datasets = [wine_quality_uniq_details] datasets = [enhancer_brain, wine_quality_uniq_details] experiment_details = [] for ds in datasets: data = ds['data'] data.load_and_process() data.build_train_test_split() data.scale_standard() experiment_details.append( experiments.ExperimentDetails( data, ds['name'], ds['readable_name'], threads=threads, seed=seed, bparams=True, # Turn this to True for best params in each clf )) if args.knn or args.all: run_experiment(experiment_details, experiments.KNNExperiment, 'KNN', verbose, timings) if args.boosting or args.all: run_experiment(experiment_details, experiments.BoostingExperiment, 'Boosting', verbose, timings) if args.ann or args.all: run_experiment(experiment_details, experiments.ANNExperiment, 'ANN', verbose, timings)
print("Running experiments") timings = {} datasets = [ds1_details, ds2_details] experiment_details = [] for ds in datasets: data = ds["data"] data.load_and_process() data.build_train_test_split() data.scale_standard() experiment_details.append( experiments.ExperimentDetails(data, ds["name"], ds["readable_name"], threads=threads, seed=seed)) if args.ann or args.all: run_experiment(experiment_details, experiments.ANNExperiment, "ANN", verbose, timings) if args.boosting or args.all: run_experiment( experiment_details, experiments.BoostingExperiment, "Boosting", verbose, timings, )
datasets.append(dataset2_details) elif not args.dataset1 and not args.dataset2: datasets.append(dataset1_details) datasets.append(dataset2_details) experiment_details = [] for ds in datasets: data = ds["data"] data.load_and_process() data.build_train_test_split() data.scale_standard() experiment_details.append( experiments.ExperimentDetails( data, ds["name"], ds["readable_name"], ds["best_nn_params"], threads=threads, seed=seed, )) if (args.all or args.benchmark or args.ica or args.pca or args.lda or args.svd or args.rf or args.rp): if verbose: logger.info("----------") logger.info("Running experiments") timings = {} if args.benchmark or args.all: run_experiment(