コード例 #1
0
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)
  
  logging.info("args = %s", args)

  torch.backends.cudnn.benchmark = True
  torch.backends.cudnn.enabled=True

  model = SearchSpace()
  model.cuda()

  optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
  scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), eta_min=args.learning_rate_min)

  architect = Architect(model, args)

  train_samples = Rain800(args.data+'training/', args.steps*args.batch_size, args.patch_size)
  train_queue = torch.utils.data.DataLoader(train_samples, batch_size=args.batch_size, pin_memory=True)
  val_samples = Rain800(args.data+'test_syn/', 30*args.batch_size, args.patch_size)
  valid_queue = torch.utils.data.DataLoader(val_samples, batch_size=args.batch_size, pin_memory=True)

  best_psnr = 0
  best_psnr_epoch = 0
  best_ssim = 0
  best_ssim_epoch = 0
  best_loss = float("inf") 
  best_loss_epoch = 0
  for epoch in range(args.epochs):
    lr = scheduler.get_lr()[0]
    logging.info('epoch %d/%d lr %e', epoch+1, args.epochs, lr)

    # training
    train(epoch, train_queue, valid_queue, model, architect, optimizer, lr)
    # validation
    psnr, ssim, loss = infer(valid_queue, model)
    
    if psnr > best_psnr and not math.isinf(psnr):
      utils.save(model, os.path.join(args.save, 'best_psnr_weights.pt'))
      best_psnr_epoch = epoch+1
      best_psnr = psnr
    if ssim > best_ssim:
      utils.save(model, os.path.join(args.save, 'best_ssim_weights.pt'))
      best_ssim_epoch = epoch+1
      best_ssim = ssim
    if loss < best_loss:
      utils.save(model, os.path.join(args.save, 'best_loss_weights.pt'))
      best_loss_epoch = epoch+1
      best_loss = loss

    scheduler.step()
    logging.info('psnr:%6f ssim:%6f loss:%6f -- best_psnr:%6f best_ssim:%6f best_loss:%6f', psnr, ssim, loss, best_psnr, best_ssim, best_loss)
    logging.info('arch:%s', torch.argmax(model.arch_parameters()[0], dim=1))
    
  logging.info('BEST_LOSS(epoch):%6f(%d), BEST_PSNR(epoch):%6f(%d), BEST_SSIM(epoch):%6f(%d)', best_loss, best_loss_epoch, best_psnr, best_psnr_epoch, best_ssim, best_ssim_epoch)
  utils.save(model, os.path.join(args.save, 'last_weights.pt'))
コード例 #2
0
ファイル: tests.py プロジェクト: amirhpd/emnas
def test_trainer():
    search_space = SearchSpace(model_output_shape=2)
    tokens = search_space.generate_token()
    # controller = Controller(tokens=tokens)
    trainer = Trainer()

    # samples = controller.generate_sequence()
    samples = [[65, 146, 143, 201, 281, 382]]
    architectures = search_space.create_models(samples=samples,
                                               model_input_shape=(128, 128, 3))
    epoch_performance = trainer.train_models(samples=samples,
                                             architectures=architectures)
    assert len(epoch_performance) != 0
コード例 #3
0
ファイル: test_cases.py プロジェクト: amirhpd/mvnas
def test_search_space():
    search_space = SearchSpace(target_classes=2)
    vocab = search_space.mapping
    vocab_decoded = search_space.decode_sequence(vocab)
    vocab_encoded = search_space.encode_sequence(vocab_decoded)

    sample_sequence = [8, 8, 30]
    input_shape = np.shape(np.zeros(13))
    model = search_space.create_architecture(sample_sequence, input_shape)
    keras.utils.plot_model(model, to_file="model.png", show_shapes=True)
    assert len(vocab) == 30
    assert len(vocab_decoded) == 30
    assert len(vocab_encoded) == 30
コード例 #4
0
ファイル: tests.py プロジェクト: amirhpd/emnas
def test_controller_rnn_trainer():
    search_space = SearchSpace(model_output_shape=2)
    tokens = search_space.generate_token()
    controller = Controller(tokens=tokens)
    # samples = controller.generate_sequence()
    manual_epoch_performance = {
        (320, 96, 338, 84, 176, 382): (0.968, 0),  # (acc, lat)
        (22, 47, 225, 315, 223, 382): (0.87, 0),
        (74, 204, 73, 236, 309, 382): (0.74, 0),
        (110, 60, 191, 270, 199, 382): (0.51, 0)
    }

    loss_avg = controller.train_controller_rnn(
        epoch_performance=manual_epoch_performance)
    print(loss_avg)
コード例 #5
0
ファイル: tests.py プロジェクト: amirhpd/emnas
def test_search_space():
    search_space = SearchSpace(model_output_shape=2)
    token = search_space.generate_token()

    dense_tokens = [x for x, y in token.items()
                    if "Dense" in y]  # dense layers start from 865
    sample_sequence = [52, 146, 31, 119, 138, 244]
    translated_sequence = search_space.translate_sequence(sample_sequence)
    assert len(translated_sequence) == 4

    model = search_space.create_model(sequence=sample_sequence,
                                      model_input_shape=(128, 128, 3))
    keras.utils.plot_model(model, to_file="model.png", show_shapes=True)
    print(model.summary())
    assert len(token) == 890
コード例 #6
0
ファイル: tests.py プロジェクト: amirhpd/emnas
def test_controller_generate_sequence_naive():
    search_space = SearchSpace(model_output_shape=2)
    tokens = search_space.generate_token()
    controller = Controller(tokens=tokens)

    # samples = controller.generate_sequence_naive(mode="b")
    # for sequence in samples:
    #     sequence_ = sequence
    #     print(sequence_)

    # sequences_random = controller.generate_sequence_naive(mode="r")

    for i in range(20):
        sequences_random = controller.generate_sequence_naive(mode="r_var_len")
        print(sequences_random)
    print("Done.")
コード例 #7
0
ファイル: main.py プロジェクト: davison0487/Motion-Planning
def rrt_star_test(index=0):
    t0 = tic()
    mapfile = [
        './maps/single_cube.txt', './maps/maze.txt', './maps/window.txt',
        './maps/tower.txt', './maps/flappy_bird.txt', './maps/room.txt',
        './maps/monza.txt'
    ]
    name = [
        'single_cube', 'maze', 'window', 'tower', 'flappy_bird', 'room',
        'monza'
    ]
    start_list = [(2.3, 2.3, 1.3), (0.0, 0.0, 1.0), (0.2, -4.9, 0.2),
                  (2.5, 4.0, 0.5), (0.5, 2.5, 5.5), (1.0, 5.0, 1.5),
                  (0.5, 1.0, 4.9)]
    goal_list = [(7.0, 7.0, 5.5), (12.0, 12.0, 5.0), (6.0, 18.0, 3.0),
                 (4.0, 2.5, 19.5), (19.0, 2.5, 5.5), (9.0, 7.0, 1.5),
                 (3.8, 1.0, 0.1)]

    print('Running RRT* ' + name[index] + ' test...\n')

    start = start_list[index]
    goal = goal_list[index]
    boundary, blocks = load_map(mapfile[index])
    dimension = np.array([(boundary[0, 0], boundary[0, 3]),
                          (boundary[0, 1], boundary[0, 4]),
                          (boundary[0, 2], boundary[0, 5])])
    new_blocks = np.zeros((1, 6))
    for block in blocks:
        new_blocks = np.concatenate((new_blocks, [block[:6]]))
    new_blocks = np.delete(new_blocks, 0, 0)
    Q = np.array([(0.5, 4)])
    r = 0.05
    max_samples = 102400
    prc = 0.1
    rewire = 10

    X = SearchSpace(dimension, new_blocks)

    rrt = RRTStar(X, Q, start, goal, max_samples, r, prc, rewire)
    path = rrt.rrt_star()
    plot_rrt(name[index], X, rrt, path, new_blocks, start, goal)
    toc(t0, name[index] + ' RRT*')

    pathlength = np.sum(
        np.sqrt(np.sum(np.diff(np.array(path), axis=0)**2, axis=1)))
    print('Path length is:', pathlength)
コード例 #8
0
    def optimize(self, run_f, params):
        search_tree = SearchSpace(params)

        lb = search_tree.get_lb()
        ub = search_tree.get_ub()
        f = param_decorator(run_f, search_tree)

        gs = RandomSearch(self.num_runs, lb, ub, self.sobol)

        start = timeit.default_timer()
        best_params, score = gs.optimize(f)
        end = timeit.default_timer() - start

        best_params = search_tree.transform(best_params)
        Result = namedtuple('Result', ['params', 'score', 'time'])

        return Result(best_params, score, end)
コード例 #9
0
    def optimize(self, run_f, params, parallel=False):

        search_tree = SearchSpace(params)

        lb = search_tree.get_lb()
        ub = search_tree.get_ub()
        f = Evaluator(run_f, search_tree)

        algorithm = self.algorithm(lb, ub, parallel, *self.args, **self.kwargs)

        start = timeit.default_timer()
        best_params, score = algorithm.run(f)
        end = timeit.default_timer() - start

        best_params = search_tree.transform(best_params)
        # Result = namedtuple('Result', ['params', 'score', 'time'])

        return Result(best_params, score, end)
コード例 #10
0
    def optimize(self, run_f, params):
        search_tree = SearchSpace(params)

        lb = search_tree.get_lb()
        ub = search_tree.get_ub()
        f = param_decorator(run_f, search_tree)

        pso = PSO(self.num_generations, self.num_particles, lb, ub, self.phi1,
                  self.phi2)

        start = timeit.default_timer()
        best_params, score = pso.optimize(f)
        end = timeit.default_timer() - start

        best_params = search_tree.transform(best_params)
        Result = namedtuple('Result', ['params', 'score', 'time'])

        return Result(best_params, score, end)
コード例 #11
0
def generate_models():
    if config.search_space["mode"] == "MobileNets":
        search_space = SearchSpaceMn(model_output_shape=2)
    else:
        search_space = SearchSpace(model_output_shape=2)

    if os.listdir(latency_dataset):
        raise ValueError("Dataset folder is not empty.")
    tokens = search_space.generate_token()
    controller = Controller(tokens=tokens)
    sequences = []
    df = pd.DataFrame(columns=["model", "params [K]", "sipeed_latency [ms]", "kmodel_memory [KB]", "cpu_latency [ms]",
                               "accuracy", "token_sequence", "length", "model_info"])

    i = 0
    while i < no_of_examples:
        sequence = controller.generate_sequence_naive(mode="r_var_len")
        if (sequence in sequences) or (not search_space.check_sequence(sequence)):
            continue
        try:
            architecture = search_space.create_model(sequence=sequence, model_input_shape=model_input_shape)
        except Exception as e:
            # print(sequence)
            # print(e)
            continue
        sequences.append(sequence)
        i += 1
        i_str = format(i, f"0{len(str(no_of_examples))}d")  # add 0s
        file_name = f"model_{i_str}"
        architecture.save(f"{latency_dataset}/{file_name}.h5")
        model_params = round(architecture.count_params()/1000, 4)
        model_info = search_space.translate_sequence(sequence)
        model_info_json = json.dumps(dict(zip(range(len(model_info)), model_info)))
        df = df.append({"model": file_name, "params [K]": model_params,
                        "token_sequence": sequence, "length": len(sequence),
                        "model_info": model_info_json}, ignore_index=True)
        print(file_name, ", length:", len(sequence))

    df.to_csv(f"{latency_dataset}/table.csv", index=False)
コード例 #12
0
 def __init__(self, tokens):
     self.max_no_of_layers = config.controller["max_no_of_layers"]
     self.agent_lr = config.controller["agent_lr"]
     self.min_reward = config.controller["min_reward"]
     self.min_plays = config.controller["min_plays"]
     self.max_plays = config.controller["max_plays"]
     self.alpha = config.controller["alpha"]
     self.gamma = config.controller["gamma"]
     self.model_input_shape = config.emnas["model_input_shape"]
     self.valid_sequence_timeout = config.controller[
         "valid_sequence_timeout"]
     self.tokens = tokens
     self.len_search_space = len(tokens) + 1
     self.end_token = list(tokens.keys())[-1]
     self.model = self.rl_agent()
     self.states = []
     self.gradients = []
     self.rewards = []
     self.probs = []
     if config.search_space["mode"] == "MobileNets":
         self.search_space = SearchSpaceMn(
             config.emnas["model_output_shape"])
     else:
         self.search_space = SearchSpace(config.emnas["model_output_shape"])
コード例 #13
0
    def initialize(self, width, height, robot_pos, goal_pos, map_data):
        """ Initialize the Moving Target D* Lite algorithm """
        # Search variables
        self.km = 0
        self.open_list = OpenList()
        self.deleted_list = []
        self.path = []

        # Create a search space
        self.search_space = SearchSpace(width, height)
        self.search_space.load_search_space_from_map(map_data)

        # Get the node the robot is on (row, col is the pos argument)
        self.start_node = self.search_space.get_node(robot_pos)

        # Get the node the goal is on (row, col is the pos argument)
        self.goal_node = self.search_space.get_node(goal_pos)

        # Set the robot's node's rhs = 0
        self.start_node.set_rhs(0)

        # Add the robot's node to the open list
        self.start_node.set_key(self.calculate_key(self.start_node))
        self.open_list.insert(self.start_node)
コード例 #14
0
from optimization_problem import OptimizationProblem
from bayes_optimizer import BayesOptimizer
from search_space import SearchSpace, Parameter
import random
from sklearn.gaussian_process import GaussianProcessRegressor
from benchmark import branin
from numpy import average
branin_problem = OptimizationProblem(
    SearchSpace(
        [
            Parameter("", random.uniform, a=-5, b=10),
            Parameter("", random.uniform, a=0, b=15),
        ]
    ),
    function=branin,
    optimal_value=-0.397887,
)

branin_optimizer = BayesOptimizer(
    branin_problem, GaussianProcessRegressor(), max_iterations=1000, epsilon=0.01
)


def objective(array, *args, **kwargs):
    return average([branin_optimizer.optimize(start_sample_size=array[0], sample_size=array[1]) for i in range(20)]) * -1


bayes_optimization_problem = OptimizationProblem(
    SearchSpace(
        [
            # Parameter("target", random.choice, [branin_optimizer]),
コード例 #15
0
ファイル: tests.py プロジェクト: amirhpd/emnas
def test_controller_sample_generator():
    search_space = SearchSpace(model_output_shape=2)
    tokens = search_space.generate_token()
    controller = Controller(tokens=tokens)
    samples = controller.generate_sequence()
    print(samples)
コード例 #16
0
ファイル: rrt_7d.py プロジェクト: cjohns94/RoboticsProject
# Obstacles = np.array([
# 	(20, 20, 20, 20, 20, 20, 20, 40, 40, 40, 40, 40, 40, 40),
# 	(10, 10, 10, 10, 10, 10, 10, 18, 18, 18, 18, 18, 18, 18)
# 	])

x_init = (0, 0, 0, 0, 0, 0, 0)  # starting location
x_goal = (0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5)  # goal location

Q = np.array([(.05, .05, .05, .05, .05, .05, .05)])  # length of tree edges
r = .01  # length of smallest edge to check for intersection with obstacles
max_samples = 100  # max number of samples to take before timing out
prc = 0.1  # probability of checking for a connection to goal

# create search space
#X = SearchSpace(X_dimensions, Obstacles)
X = SearchSpace(X_dimensions)  #can set obstacles to none.

# create rrt_search
rrt = RRT(X, Q, x_init, x_goal, max_samples, r, prc)
print('rrt built. Searching for path...')
path = rrt.rrt_search()

#TODO: figure out plotting with plotly
# plot
#plot = Plot("rrt_2d")
#plot.plot_tree(X, rrt.trees)
#if path is not None:
#    plot.plot_path(X, path)
#plot.plot_obstacles(X, Obstacles)
#plot.plot_start(X, x_init)
#plot.plot_goal(X, x_goal)
コード例 #17
0
ファイル: rrt_nd.py プロジェクト: cjohns94/RoboticsProject
# obstacles (q1 lower, q2 lower, ..., qn lower, q1 upper, q2 upper,..., qn upper), each obstacle should have 2n coordinates
Obstacles = np.array([(20, 20, 20, 20, 20, 20, 20, 40, 40, 40, 40, 40, 40, 40),
                      (10, 10, 10, 10, 10, 10, 10, 18, 18, 18, 18, 18, 18, 18)
                      ])

x_init = (0, 0, 0, 0, 0, 0, 0)  # starting location
x_goal = (50, 50, 50, 50, 50, 50, 50)  # goal location

Q = np.array([(8, 4)])  # length of tree edges
r = 1  # length of smallest edge to check for intersection with obstacles
max_samples = 1024  # max number of samples to take before timing out
prc = 0.1  # probability of checking for a connection to goal

# create search space
X = SearchSpace(X_dimensions, Obstacles)

# create rrt_search
rrt = RRT(X, Q, x_init, x_goal, max_samples, r, prc)
path = rrt.rrt_search()

#TODO: figure out plotting with plotly
# plot
#plot = Plot("rrt_2d")
#plot.plot_tree(X, rrt.trees)
#if path is not None:
#    plot.plot_path(X, path)
#plot.plot_obstacles(X, Obstacles)
#plot.plot_start(X, x_init)
#plot.plot_goal(X, x_goal)
#plot.draw(auto_open=True)
コード例 #18
0
ファイル: emnas.py プロジェクト: amirhpd/emnas
model_input_shape = config.emnas["model_input_shape"]
search_mode = config.emnas["search_mode"]
naive_threshold = config.emnas["naive_threshold"]
naive_timeout = config.emnas["naive_timeout"]
no_of_episodes = config.emnas["no_of_episodes"]
log_path = config.emnas["log_path"]
max_no_of_layers = config.controller["max_no_of_layers"]
dynamic_min_reward = config.controller["dynamic_min_reward"]
variance_threshold = config.controller["variance_threshold"]
valid_actions = config.controller["valid_actions"]
valid_sequence_timeout = config.controller["valid_sequence_timeout"]

if config.search_space["mode"] == "MobileNets":
    search_space = SearchSpaceMn(model_output_shape=model_output_shape)
else:
    search_space = SearchSpace(model_output_shape=model_output_shape)
tokens = search_space.generate_token()
controller = Controller(tokens=tokens)
trainer = Trainer(tokens)


def _plot(history, path):
    img_size = (24, 5)

    fig = plt.figure(figsize=img_size)
    plt.plot(np.arange(0, len(history["loss"])), history["loss"])
    plt.title("Agent loss")
    plt.xlabel("Episode")
    plt.ylabel("Loss")
    plt.grid()
    plt.savefig(path + "/fig_1.png", bbox_inches="tight", pad_inches=0.2)
コード例 #19
0
ファイル: run_trial.py プロジェクト: mesosphere/katib
def main():

    parser = argparse.ArgumentParser(description='TrainingContainer')
    parser.add_argument('--algorithm-settings',
                        type=str,
                        default="",
                        help="algorithm settings")
    parser.add_argument('--search-space',
                        type=str,
                        default="",
                        help="search space for the neural architecture search")
    parser.add_argument('--num-layers',
                        type=str,
                        default="",
                        help="number of layers of the neural network")

    args = parser.parse_args()

    # Get Algorithm Settings
    algorithm_settings = args.algorithm_settings.replace("\'", "\"")
    algorithm_settings = json.loads(algorithm_settings)
    print(">>> Algorithm settings")
    for key, value in algorithm_settings.items():
        if len(key) > 13:
            print("{}\t{}".format(key, value))
        elif len(key) < 5:
            print("{}\t\t\t{}".format(key, value))
        else:
            print("{}\t\t{}".format(key, value))
    print()

    num_epochs = int(algorithm_settings["num_epochs"])

    w_lr = float(algorithm_settings["w_lr"])
    w_lr_min = float(algorithm_settings["w_lr_min"])
    w_momentum = float(algorithm_settings["w_momentum"])
    w_weight_decay = float(algorithm_settings["w_weight_decay"])
    w_grad_clip = float(algorithm_settings["w_grad_clip"])

    alpha_lr = float(algorithm_settings["alpha_lr"])
    alpha_weight_decay = float(algorithm_settings["alpha_weight_decay"])

    batch_size = int(algorithm_settings["batch_size"])
    num_workers = int(algorithm_settings["num_workers"])

    init_channels = int(algorithm_settings["init_channels"])

    print_step = int(algorithm_settings["print_step"])

    num_nodes = int(algorithm_settings["num_nodes"])
    stem_multiplier = int(algorithm_settings["stem_multiplier"])

    # Get Search Space
    search_space = args.search_space.replace("\'", "\"")
    search_space = json.loads(search_space)
    search_space = SearchSpace(search_space)

    # Get Num Layers
    num_layers = int(args.num_layers)
    print("Number of layers {}\n".format(num_layers))

    # Set GPU Device
    # Currently use only first available GPU
    # TODO: Add multi GPU support
    # TODO: Add functionality to select GPU
    all_gpus = list(range(torch.cuda.device_count()))
    if len(all_gpus) > 0:
        device = torch.device("cuda")
        torch.cuda.set_device(all_gpus[0])
        np.random.seed(2)
        torch.manual_seed(2)
        torch.cuda.manual_seed_all(2)
        torch.backends.cudnn.benchmark = True
        print(">>> Use GPU for Training <<<")
        print("Device ID: {}".format(torch.cuda.current_device()))
        print("Device name: {}".format(torch.cuda.get_device_name(0)))
        print("Device availability: {}\n".format(torch.cuda.is_available()))
    else:
        device = torch.device("cpu")
        print(">>> Use CPU for Training <<<")

    # Get dataset with meta information
    # TODO: Add support for more dataset
    input_channels, num_classes, train_data = utils.get_dataset()

    criterion = nn.CrossEntropyLoss().to(device)

    model = NetworkCNN(init_channels, input_channels, num_classes, num_layers,
                       criterion, search_space, num_nodes, stem_multiplier)

    model = model.to(device)

    # Weights optimizer
    w_optim = torch.optim.SGD(model.getWeights(),
                              w_lr,
                              momentum=w_momentum,
                              weight_decay=w_weight_decay)

    # Alphas optimizer
    alpha_optim = torch.optim.Adam(model.getAlphas(),
                                   alpha_lr,
                                   betas=(0.5, 0.999),
                                   weight_decay=alpha_weight_decay)

    # Split data to train/validation
    num_train = len(train_data)
    split = num_train // 2
    indices = list(range(num_train))

    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(
        indices[:split])
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(
        indices[split:])

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               sampler=train_sampler,
                                               num_workers=num_workers,
                                               pin_memory=True)

    valid_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               sampler=valid_sampler,
                                               num_workers=num_workers,
                                               pin_memory=True)

    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optim,
                                                              num_epochs,
                                                              eta_min=w_lr_min)

    architect = Architect(model, w_momentum, w_weight_decay)

    # Start training
    best_top1 = 0.

    for epoch in range(num_epochs):
        lr_scheduler.step()
        lr = lr_scheduler.get_lr()[0]

        model.print_alphas()

        # Training
        print(">>> Training")
        train(train_loader, valid_loader, model, architect, w_optim,
              alpha_optim, lr, epoch, num_epochs, device, w_grad_clip,
              print_step)

        # Validation
        print("\n>>> Validation")
        cur_step = (epoch + 1) * len(train_loader)
        top1 = validate(valid_loader, model, epoch, cur_step, num_epochs,
                        device, print_step)

        # Print genotype
        genotype = model.genotype(search_space)
        print("\nModel genotype = {}".format(genotype))

        # Modify best top1
        if top1 > best_top1:
            best_top1 = top1
            best_genotype = genotype

    print("Final best Prec@1 = {:.4%}".format(best_top1))
    print("\nBest-Genotype={}".format(str(best_genotype).replace(" ", "")))
コード例 #20
0
from optimization_problem import OptimizationProblem
from benchmark import branin
from bayes_optimizer import BayesOptimizer
from search_space import SearchSpace, Parameter
import random
from sklearn.gaussian_process import GaussianProcessRegressor

branin_problem = OptimizationProblem(
    SearchSpace([
        Parameter("", random.uniform, a=-5, b=10),
        Parameter("", random.uniform, a=0, b=15),
    ]),
    function=branin,
    optimal_value=-0.397887,
)

sum = 0

optimizer = BayesOptimizer(branin_problem,
                           GaussianProcessRegressor(),
                           epsilon=0.0001,
                           max_iterations=3000)
for i in range(1):
    sum += optimizer.optimize(
        debug=True,
        start_sample_size=200,
        sample_size=1000,
    )

print("\n\n\n\nAverage iterations is ", sum)