DEHB = dehbs[args.version] # Initializing DEHB object dehb = DEHB(cs=cs, dimensions=dimensions, f=f, strategy=args.strategy, mutation_factor=args.mutation_factor, crossover_prob=args.crossover_prob, eta=args.eta, min_budget=min_budget, max_budget=max_budget, generations=args.gens) # Helper DE object for vector to config mapping de = DE(cs=cs, b=b, f=f) if args.runs is None: # for a single run if not args.fix_seed: np.random.seed(0) # Running DE iterations traj, runtime, history = dehb.run(iterations=args.iter, verbose=args.verbose) valid_scores, test_scores = calc_regrets(history) save_json(valid_scores, test_scores, runtime, output_path, args.run_id) else: # for multiple runs for run_id, _ in enumerate(range(args.runs), start=args.run_start): if not args.fix_seed: np.random.seed(run_id)
'adult': (9, 243), 'higgs': (9, 243), 'letter': (3, 81), 'mnist': (9, 243), 'optdigits': (1, 27) , 'poker': (81, 2187), } min_budget, max_budget = budgets[args.dataset] # Initializing DE object if args. async is None: de = DE(cs=cs, dimensions=dimensions, f=f, pop_size=args.pop_size, mutation_factor=args.mutation_factor, crossover_prob=args.crossover_prob, strategy=args.strategy, budget=max_budget) else: de = AsyncDE(cs=cs, dimensions=dimensions, f=f, pop_size=args.pop_size, mutation_factor=args.mutation_factor, crossover_prob=args.crossover_prob, strategy=args.strategy, budget=max_budget, async_strategy=args. async) if args.runs is None: # for a single run
# Initializing DEHB object dehb = DEHB(cs=cs, dimensions=dimensions, f=f, strategy=args.strategy, mutation_factor=args.mutation_factor, crossover_prob=args.crossover_prob, eta=args.eta, min_budget=min_budget, max_budget=max_budget, generations=args.gens) # Initializing DE object de = DE(cs=cs, dimensions=dimensions, f=f, pop_size=10, mutation_factor=args.mutation_factor, crossover_prob=args.crossover_prob, strategy=args.strategy, budget=args.max_budget) if args.runs is None: # for a single run if not args.fix_seed: np.random.seed(args.run_id) # Running DE iterations traj, runtime, history = dehb.run(iterations=args.iter, verbose=args.verbose) fh = open(os.path.join(output_path, 'run_{}.json'.format(args.run_id)), 'w') json.dump(calc_test_scores(runtime, history), fh) fh.close() else: # for multiple runs