Exemplo n.º 1
0
    def __init__(self,
                 mini_batch_size=10,
                 num_epochs=500,
                 n_units=[50, 50, 50],
                 alpha=1.0,
                 beta=1000,
                 prior=None,
                 do_mcmc=True,
                 n_hypers=20,
                 chain_length=2000,
                 burnin_steps=2000,
                 normalize_input=True,
                 normalize_output=True,
                 seed=42):

        self.model = DNGO(batch_size=mini_batch_size,
                          num_epochs=num_epochs,
                          n_units_1=n_units[0],
                          n_units_2=n_units[1],
                          n_units_3=n_units[2],
                          alpha=alpha,
                          beta=beta,
                          prior=prior,
                          do_mcmc=do_mcmc,
                          n_hypers=n_hypers,
                          chain_length=chain_length,
                          burnin_steps=burnin_steps,
                          normalize_input=normalize_input,
                          normalize_output=normalize_output,
                          rng=seed)
    def test_incumbent(self):
        model = DNGO(num_epochs=10, do_mcmc=False)
        model.train(self.X, self.y)

        x_star, y_star = model.get_incumbent()

        b = np.argmin(self.y)

        assert np.all(np.isclose(x_star, self.X[b]))
        assert np.all(np.isclose(y_star, self.y[b]))
    def test_without_normalization(self):
        model = DNGO(num_epochs=10, do_mcmc=False, normalize_output=False, normalize_input=False)
        model.train(self.X, self.y)

        X_test = np.random.rand(10, self.X.shape[1])

        m, v = model.predict(X_test)

        assert len(m.shape) == 1
        assert m.shape[0] == X_test.shape[0]
        assert len(v.shape) == 1
        assert v.shape[0] == X_test.shape[0]
    def test_ml(self):
        model = DNGO(num_epochs=10, do_mcmc=False)
        model.train(self.X, self.y)

        X_test = np.random.rand(10, self.X.shape[1])

        m, v = model.predict(X_test)

        assert len(m.shape) == 1
        assert m.shape[0] == X_test.shape[0]
        assert len(v.shape) == 1
        assert v.shape[0] == X_test.shape[0]
    def test_mcmc(self):
        model = DNGO(num_epochs=10, burnin_steps=10, chain_length=20, do_mcmc=True)
        model.train(self.X, self.y)

        X_test = np.random.rand(10, self.X.shape[1])

        m, v = model.predict(X_test)

        assert len(m.shape) == 1
        assert m.shape[0] == X_test.shape[0]
        assert len(v.shape) == 1
        assert v.shape[0] == X_test.shape[0]
Exemplo n.º 6
0
 def get_model(self, **kwargs):
     predictor = DNGO(
         batch_size=10,
         num_epochs=500,
         learning_rate=0.01,
         adapt_epoch=5000,
         n_units_1=50,
         n_units_2=50,
         n_units_3=50,
         alpha=1.0,
         beta=1000,
         prior=None,
         do_mcmc=True,  # turn this off for better sample efficiency
         n_hypers=20,
         chain_length=2000,
         burnin_steps=2000,
         normalize_input=False,
         normalize_output=True)
     return predictor
Exemplo n.º 7
0
def bayesian_optimization(objective_function,
                          lower,
                          upper,
                          num_iterations=30,
                          X_init=None,
                          Y_init=None,
                          maximizer="random",
                          acquisition_func="log_ei",
                          model_type="gp_mcmc",
                          n_init=3,
                          rng=None,
                          output_path=None,
                          kernel=None,
                          sampling_method="origin",
                          distance="cosine",
                          replacement=True,
                          pool=None,
                          best=None):
    """
    General interface for Bayesian optimization for global black box
    optimization problems.

    Parameters
    ----------
    objective_function: function
        The objective function that is minimized. This function gets a numpy
        array (D,) as input and returns the function value (scalar)
    lower: np.ndarray (D,)
        The lower bound of the search space
    upper: np.ndarray (D,)
        The upper bound of the search space
    num_iterations: int
        The number of iterations (initial design + BO)
    X_init: np.ndarray(N,D)
            Initial points to warmstart BO
    Y_init: np.ndarray(N,1)
            Function values of the already initial points
    maximizer: {"random", "scipy", "differential_evolution"}
        The optimizer for the acquisition function.
    acquisition_func: {"ei", "log_ei", "lcb", "pi"}
        The acquisition function
    model_type: {"gp", "gp_mcmc", "rf", "bohamiann", "dngo"}
        The model for the objective function.
    n_init: int
        Number of points for the initial design. Make sure that it
        is <= num_iterations.
    output_path: string
        Specifies the path where the intermediate output after each iteration will be saved.
        If None no output will be saved to disk.
    rng: numpy.random.RandomState
        Random number generator
    kernel: george.kernels.ConstantKernel
            {"constant", "polynomial", "linear", "dotproduct",
             "exp", "expsquared", "matern32", "matern52", "rationalquadratic",
             "cosine", "expsine2", "heuristic"}
        Specify the kernel for Gaussian process.
    sampling_method: {"origin", "approx", "exact"}
        Specify the method to choose next sample to update model.
        approx: choose the sample in the candidate pool that is closest (measured by distance
        arg) to the one returned from maximizing acquisition function.
        exact: evaluate all samples in the candidate pool on acquisition function
        and choose the one with maximum output.
    distance: {"cosine", "euclidean"}
        The distance measurement for approximation sampling.
    replacement: boolean
        Whether to sample from pool with replacement.
    pool: np.ndarray(N,D)
        Candidate pool containing possible x
    best: float
        Stop training when the best point is sampled.
    Returns
    -------
        dict with all results
    """
    assert upper.shape[0] == lower.shape[0], "Dimension miss match"
    assert np.all(lower < upper), "Lower bound >= upper bound"
    assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"

    if rng is None:
        rng = np.random.RandomState(np.random.randint(0, 10000))

    cov_amp = 2
    #n_dims = lower.shape[0]

    #initial_ls = np.ones([n_dims])

    # if kernel == "constant":
    #     exp_kernel = george.kernels.ConstantKernel(1, ndim=n_dims)
    # elif kernel == "polynomial":
    #     exp_kernel = george.kernels.PolynomialKernel(log_sigma2=1, order=3, ndim=n_dims)
    # elif kernel == "linear":
    #     exp_kernel = george.kernels.LinearKernel(log_gamma2=1, order=3, ndim=n_dims)
    # elif kernel == "dotproduct":
    #     exp_kernel = george.kernels.DotProductKernel(ndim=n_dims)
    # elif kernel == "exp":
    #     exp_kernel = george.kernels.ExpKernel(initial_ls, ndim=n_dims)
    # elif kernel == "expsquared":
    #     exp_kernel = george.kernels.ExpSquaredKernel(initial_ls, ndim=n_dims)
    # elif kernel == "matern32":
    #     exp_kernel = george.kernels.Matern32Kernel(initial_ls, ndim=n_dims)
    # elif kernel == "matern52":
    #     exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims)
    # elif kernel == "rationalquadratic":
    #     exp_kernel = george.kernels.RationalQuadraticKernel(log_alpha=1, metric=initial_ls, ndim=n_dims)
    # elif kernel == "cosine":
    #     exp_kernel = george.kernels.CosineKernel(4, ndim=n_dims)
    # elif kernel == "expsine2":
    #     exp_kernel = george.kerngels.ExpSine2Kernel(1, 2, ndim=n_dims)
    # elif kernel == "heuristic":
    #     exp_kernel = george.kernels.PythonKernel(heuristic_kernel_function, ndim=n_dims)
    # else:
    #     raise ValueError("'{}' is not a valid kernel".format(kernel))

    kernel = cov_amp * kernel

    prior = DefaultPrior(len(kernel) + 1)

    n_hypers = 3 * len(kernel)
    if n_hypers % 2 == 1:
        n_hypers += 1

    if model_type == "gp":
        model = GaussianProcess(kernel,
                                prior=prior,
                                rng=rng,
                                normalize_output=False,
                                normalize_input=True,
                                lower=lower,
                                upper=upper)
    elif model_type == "gp_mcmc":
        model = GaussianProcessMCMC(kernel,
                                    prior=prior,
                                    n_hypers=n_hypers,
                                    chain_length=200,
                                    burnin_steps=100,
                                    normalize_input=True,
                                    normalize_output=False,
                                    rng=rng,
                                    lower=lower,
                                    upper=upper)

    elif model_type == "rf":
        model = RandomForest(rng=rng)

    elif model_type == "bohamiann":
        model = WrapperBohamiann()

    elif model_type == "dngo":
        model = DNGO()

    else:
        raise ValueError("'{}' is not a valid model".format(model_type))

    if acquisition_func == "ei":
        a = EI(model)
    elif acquisition_func == "log_ei":
        a = LogEI(model)
    elif acquisition_func == "pi":
        a = PI(model)
    elif acquisition_func == "lcb":
        a = LCB(model)
    else:
        raise ValueError("'{}' is not a valid acquisition function".format(
            acquisition_func))

    if model_type == "gp_mcmc":
        acquisition_func = MarginalizationGPMCMC(a)
    else:
        acquisition_func = a

    if maximizer == "random":
        max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)
    elif maximizer == "scipy":
        max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)
    elif maximizer == "differential_evolution":
        max_func = DifferentialEvolution(acquisition_func,
                                         lower,
                                         upper,
                                         rng=rng)
    else:
        raise ValueError("'{}' is not a valid function to maximize the "
                         "acquisition function".format(maximizer))

    if sampling_method == "exact":
        max_func = ExactSampling(acquisition_func,
                                 lower,
                                 upper,
                                 pool,
                                 replacement,
                                 rng=rng)
        init_design = init_exact_random
    elif sampling_method == "approx":
        max_func = ApproxSampling(acquisition_func,
                                  lower,
                                  upper,
                                  pool,
                                  replacement,
                                  distance,
                                  rng=rng)
        init_design = init_exact_random
    else:
        init_design = init_latin_hypercube_sampling

    bo = BayesianOptimization(objective_function,
                              lower,
                              upper,
                              acquisition_func,
                              model,
                              max_func,
                              pool,
                              best,
                              sampling_method,
                              distance,
                              replacement,
                              initial_points=n_init,
                              rng=rng,
                              initial_design=init_design,
                              output_path=output_path)

    x_best, f_min = bo.run(num_iterations, X=X_init, y=Y_init)

    results = dict()
    results["x_opt"] = x_best
    results["f_opt"] = f_min
    results["incumbents"] = [inc for inc in bo.incumbents]
    results["incumbent_values"] = [val for val in bo.incumbents_values]
    results["runtime"] = bo.runtime
    results["overhead"] = bo.time_overhead
    results["X"] = [x.tolist() for x in bo.X]
    results["y"] = [y for y in bo.y]
    return results
Exemplo n.º 8
0
class DNGOWrap(BaseModel):
    """
    A Wrapper for MC Dropout for a fully connected
    feed forward neural network..
    """
    def __init__(self,
                 mini_batch_size=10,
                 num_epochs=500,
                 n_units=[50, 50, 50],
                 alpha=1.0,
                 beta=1000,
                 prior=None,
                 do_mcmc=True,
                 n_hypers=20,
                 chain_length=2000,
                 burnin_steps=2000,
                 normalize_input=True,
                 normalize_output=True,
                 seed=42):

        self.model = DNGO(batch_size=mini_batch_size,
                          num_epochs=num_epochs,
                          n_units_1=n_units[0],
                          n_units_2=n_units[1],
                          n_units_3=n_units[2],
                          alpha=alpha,
                          beta=beta,
                          prior=prior,
                          do_mcmc=do_mcmc,
                          n_hypers=n_hypers,
                          chain_length=chain_length,
                          burnin_steps=burnin_steps,
                          normalize_input=normalize_input,
                          normalize_output=normalize_output,
                          rng=seed)

    def _create_model(self, X, Y):
        Y = Y.flatten()
        self.model.train(X, Y, do_optimize=True)

    def _update_model(self, X_all, Y_all):
        """
        Updates the model with new observations.
        """
        Y_all = Y_all.flatten()

        if self.model is None:
            self._create_model(X_all, Y_all)
        else:
            self.model.train(X_all, Y_all, do_optimize=True)

    def predict(self, X):
        """
        Predictions with the model. Returns predictive means and standard deviations at X.
        """
        X = np.atleast_2d(X)
        m, v = self.model.predict(X)
        # m and v have shape (N,)
        s = np.sqrt(v)

        return m[:, None], s[:, None]

    def predict_withGradients(self, X):
        """
        Returns the mean, standard deviation, mean gradient and standard deviation gradient at X.
        """
        return print('Not Implemented')
Exemplo n.º 9
0
def expected_improvement_search(features, valid_labels, test_labels,
                                training_time, other_info):
    """ implementation of expected improvement search given arch2vec.
    :param data_path: the pretrained arch2vec path.
    :return: features, labels
    """
    CURR_BEST_VALID = 0.
    CURR_BEST_TEST = 0.
    CURR_BEST_INFO = None
    MAX_BUDGET = args.MAX_BUDGET
    window_size = 200
    counter = 0
    rt = 0.
    visited = {}
    best_trace = defaultdict(list)

    features, valid_labels, test_labels, training_time = features.cpu().detach(
    ), valid_labels.cpu().detach(), test_labels.cpu().detach(
    ), training_time.cpu().detach()
    feat_samples, valid_label_samples, test_label_samples, time_samples, other_info_sampled, visited = get_init_samples(
        features, valid_labels, test_labels, training_time, other_info,
        visited)

    t_start = time.time()
    for feat, acc_valid, acc_test, t, o_info in zip(feat_samples,
                                                    valid_label_samples,
                                                    test_label_samples,
                                                    time_samples,
                                                    other_info_sampled):
        counter += 1
        rt += t.item()
        if acc_valid > CURR_BEST_VALID:
            CURR_BEST_VALID = acc_valid
            CURR_BEST_TEST = acc_test
            CURR_BEST_INFO = o_info
        best_trace['validation'].append(float(CURR_BEST_VALID))
        best_trace['test'].append(float(CURR_BEST_TEST))
        best_trace['time'].append(time.time() - t_start)
        best_trace['counter'].append(counter)

    while rt < MAX_BUDGET:
        print("feat_samples:", feat_samples.shape)
        print("valid label_samples:", valid_label_samples.shape)
        print("test label samples:", test_label_samples.shape)
        print("current best validation: {}".format(CURR_BEST_VALID))
        print("current best test: {}".format(CURR_BEST_TEST))
        print("rt: {}".format(rt))
        print(feat_samples.shape)
        print(valid_label_samples.shape)
        model = DNGO(num_epochs=100,
                     n_units=128,
                     do_mcmc=False,
                     normalize_output=False)
        model.train(X=feat_samples.numpy(),
                    y=valid_label_samples.view(-1).numpy(),
                    do_optimize=True)
        print(model.network)
        m = []
        v = []
        chunks = int(features.shape[0] / window_size)
        if features.shape[0] % window_size > 0:
            chunks += 1
        features_split = torch.split(features, window_size, dim=0)
        for i in range(chunks):
            m_split, v_split = model.predict(features_split[i].numpy())
            m.extend(list(m_split))
            v.extend(list(v_split))
        mean = torch.Tensor(m)
        sigma = torch.Tensor(v)
        u = (mean - torch.Tensor([1.0]).expand_as(mean)) / sigma
        normal = Normal(torch.zeros_like(u), torch.ones_like(u))
        ucdf = normal.cdf(u)
        updf = torch.exp(normal.log_prob(u))
        ei = sigma * (updf + u * ucdf)
        feat_next, label_next_valid, label_next_test, time_next, info_next, visited = propose_location(
            ei, features, valid_labels, test_labels, training_time, other_info,
            visited)

        # add proposed networks to selected networks
        for feat, acc_valid, acc_test, t, o_info in zip(
                feat_next, label_next_valid, label_next_test, time_next,
                info_next):
            feat_samples = torch.cat((feat_samples, feat.view(1, -1)), dim=0)
            valid_label_samples = torch.cat(
                (valid_label_samples.view(-1, 1), acc_valid.view(1, 1)), dim=0)
            test_label_samples = torch.cat(
                (test_label_samples.view(-1, 1), acc_test.view(1, 1)), dim=0)
            counter += 1
            rt += t.item()
            if acc_valid > CURR_BEST_VALID:
                CURR_BEST_VALID = acc_valid
                CURR_BEST_TEST = acc_test
                CURR_BEST_INFO = o_info

            best_trace['acc_validation'].append(float(CURR_BEST_VALID))
            best_trace['acc_test'].append(float(CURR_BEST_TEST))
            best_trace['search_time'].append(
                time.time() - t_start)  # The actual searching time
            best_trace['counter'].append(counter)

            if rt >= MAX_BUDGET:
                break

    res = dict()
    res['regret_validation'] = best_trace['regret_validation']
    res['regret_test'] = best_trace['regret_test']
    res['runtime'] = best_trace['time']
    res['counter'] = best_trace['counter']
    save_path = os.path.join(args.output_path, 'dim{}'.format(args.dim))
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    print('save to {}'.format(save_path))
    print('Current Best Valid {}, Test {}'.format(CURR_BEST_VALID,
                                                  CURR_BEST_TEST))
    data_dict = {
        'val_acc': float(CURR_BEST_VALID),
        'test_acc': float(CURR_BEST_TEST),
        'val_acc_avg': float(CURR_BEST_INFO['valid_accuracy_avg']),
        'test_acc_avg': float(CURR_BEST_INFO['test_accuracy_avg'])
    }
    save_dir = os.path.join(
        save_path,
        'nasbench201_{}_run_{}_full.json'.format(args.dataset_name, args.seed))
    with open(save_dir, 'w') as f:
        json.dump(data_dict, f)
Exemplo n.º 10
0
def bayesian_optimization(objective_function,
                          lower,
                          upper,
                          num_iterations=30,
                          X_init=None,
                          Y_init=None,
                          maximizer="random",
                          acquisition_func="log_ei",
                          model_type="gp_mcmc",
                          n_init=3,
                          rng=None,
                          output_path=None):
    """
    General interface for Bayesian optimization for global black box
    optimization problems.

    Parameters
    ----------
    objective_function: function
        The objective function that is minimized. This function gets a numpy
        array (D,) as input and returns the function value (scalar)
    lower: np.ndarray (D,)
        The lower bound of the search space
    upper: np.ndarray (D,)
        The upper bound of the search space
    num_iterations: int
        The number of iterations (initial design + BO)
    X_init: np.ndarray(N,D)
            Initial points to warmstart BO
    Y_init: np.ndarray(N,1)
            Function values of the already initial points
    maximizer: {"random", "scipy", "differential_evolution"}
        The optimizer for the acquisition function.
    acquisition_func: {"ei", "log_ei", "lcb", "pi"}
        The acquisition function
    model_type: {"gp", "gp_mcmc", "rf", "bohamiann", "dngo"}
        The model for the objective function.
    n_init: int
        Number of points for the initial design. Make sure that it
        is <= num_iterations.
    output_path: string
        Specifies the path where the intermediate output after each iteration will be saved.
        If None no output will be saved to disk.
    rng: numpy.random.RandomState
        Random number generator

    Returns
    -------
        dict with all results
    """
    assert upper.shape[0] == lower.shape[0], "Dimension miss match"
    assert np.all(lower < upper), "Lower bound >= upper bound"
    assert n_init <= num_iterations, "Number of initial design point has to be <= than the number of iterations"

    if rng is None:
        rng = np.random.RandomState(np.random.randint(0, 10000))

    cov_amp = 2
    n_dims = lower.shape[0]

    initial_ls = np.ones([n_dims])
    exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims)
    kernel = cov_amp * exp_kernel

    prior = DefaultPrior(len(kernel) + 1)

    n_hypers = 3 * len(kernel)
    if n_hypers % 2 == 1:
        n_hypers += 1

    if model_type == "gp":
        model = GaussianProcess(kernel,
                                prior=prior,
                                rng=rng,
                                normalize_output=False,
                                normalize_input=True,
                                lower=lower,
                                upper=upper)
    elif model_type == "gp_mcmc":
        model = GaussianProcessMCMC(kernel,
                                    prior=prior,
                                    n_hypers=n_hypers,
                                    chain_length=200,
                                    burnin_steps=100,
                                    normalize_input=True,
                                    normalize_output=False,
                                    rng=rng,
                                    lower=lower,
                                    upper=upper)

    elif model_type == "rf":
        model = RandomForest(rng=rng)

    elif model_type == "bohamiann":
        model = WrapperBohamiann()

    elif model_type == "dngo":
        model = DNGO()

    else:
        raise ValueError("'{}' is not a valid model".format(model_type))

    if acquisition_func == "ei":
        a = EI(model)
    elif acquisition_func == "log_ei":
        a = LogEI(model)
    elif acquisition_func == "pi":
        a = PI(model)
    elif acquisition_func == "lcb":
        a = LCB(model)
    else:
        raise ValueError("'{}' is not a valid acquisition function".format(
            acquisition_func))

    if model_type == "gp_mcmc":
        acquisition_func = MarginalizationGPMCMC(a)
    else:
        acquisition_func = a

    if maximizer == "random":
        max_func = RandomSampling(acquisition_func, lower, upper, rng=rng)
    elif maximizer == "scipy":
        max_func = SciPyOptimizer(acquisition_func, lower, upper, rng=rng)
    elif maximizer == "differential_evolution":
        max_func = DifferentialEvolution(acquisition_func,
                                         lower,
                                         upper,
                                         rng=rng)
    else:
        raise ValueError("'{}' is not a valid function to maximize the "
                         "acquisition function".format(maximizer))

    bo = BayesianOptimization(objective_function,
                              lower,
                              upper,
                              acquisition_func,
                              model,
                              max_func,
                              initial_points=n_init,
                              rng=rng,
                              initial_design=init_latin_hypercube_sampling,
                              output_path=output_path)

    x_best, f_min = bo.run(num_iterations, X=X_init, y=Y_init)

    results = dict()
    results["x_opt"] = x_best
    results["f_opt"] = f_min
    results["incumbents"] = [inc for inc in bo.incumbents]
    results["incumbent_values"] = [val for val in bo.incumbents_values]
    results["runtime"] = bo.runtime
    results["overhead"] = bo.time_overhead
    results["X"] = [x.tolist() for x in bo.X]
    results["y"] = [y for y in bo.y]
    return results
Exemplo n.º 11
0
def expected_improvement_search(features, genotype):
    """ implementation of arch2vec-DNGO on DARTS Search Space """
    CURR_BEST_VALID = 0.
    CURR_BEST_TEST = 0.
    CURR_BEST_GENOTYPE = None
    MAX_BUDGET = args.max_budgets
    window_size = 200
    counter = 0
    visited = {}
    best_trace = defaultdict(list)

    features, genotype = features.cpu().detach(), genotype
    feat_samples, geno_samples, valid_label_samples, test_label_samples, visited = get_init_samples(
        features, genotype, visited)

    for feat, geno, acc_valid, acc_test in zip(feat_samples, geno_samples,
                                               valid_label_samples,
                                               test_label_samples):
        counter += 1
        if acc_valid > CURR_BEST_VALID:
            CURR_BEST_VALID = acc_valid
            CURR_BEST_TEST = acc_test
            CURR_BEST_GENOTYPE = geno
        best_trace['validation_acc'].append(float(CURR_BEST_VALID))
        best_trace['test_acc'].append(float(CURR_BEST_TEST))
        best_trace['genotype'].append(CURR_BEST_GENOTYPE)
        best_trace['counter'].append(counter)

    while counter < MAX_BUDGET:
        print("feat_samples:", feat_samples.shape)
        print("length of genotypes:", len(geno_samples))
        print("valid label_samples:", valid_label_samples.shape)
        print("test label samples:", test_label_samples.shape)
        print("current best validation: {}".format(CURR_BEST_VALID))
        print("current best test: {}".format(CURR_BEST_TEST))
        print("counter: {}".format(counter))
        print(feat_samples.shape)
        print(valid_label_samples.shape)
        model = DNGO(num_epochs=100,
                     n_units=128,
                     do_mcmc=False,
                     normalize_output=False)
        model.train(X=feat_samples.numpy(),
                    y=valid_label_samples.view(-1).numpy(),
                    do_optimize=True)
        print(model.network)
        m = []
        v = []
        chunks = int(features.shape[0] / window_size)
        if features.shape[0] % window_size > 0:
            chunks += 1
        features_split = torch.split(features, window_size, dim=0)
        for i in range(chunks):
            m_split, v_split = model.predict(features_split[i].numpy())
            m.extend(list(m_split))
            v.extend(list(v_split))
        mean = torch.Tensor(m)
        sigma = torch.Tensor(v)
        u = (mean - torch.Tensor([args.objective]).expand_as(mean)) / sigma
        normal = Normal(torch.zeros_like(u), torch.ones_like(u))
        ucdf = normal.cdf(u)
        updf = torch.exp(normal.log_prob(u))
        ei = sigma * (updf + u * ucdf)
        feat_next, geno_next, label_next_valid, label_next_test, visited = propose_location(
            ei, features, genotype, visited, counter)

        # add proposed networks to the pool
        for feat, geno, acc_valid, acc_test in zip(feat_next, geno_next,
                                                   label_next_valid,
                                                   label_next_test):
            feat_samples = torch.cat((feat_samples, feat.view(1, -1)), dim=0)
            geno_samples.append(geno)
            valid_label_samples = torch.cat(
                (valid_label_samples.view(-1, 1), acc_valid.view(1, 1)), dim=0)
            test_label_samples = torch.cat(
                (test_label_samples.view(-1, 1), acc_test.view(1, 1)), dim=0)
            counter += 1
            if acc_valid.item() > CURR_BEST_VALID:
                CURR_BEST_VALID = acc_valid.item()
                CURR_BEST_TEST = acc_test.item()
                CURR_BEST_GENOTYPE = geno

            best_trace['validation_acc'].append(float(CURR_BEST_VALID))
            best_trace['test_acc'].append(float(CURR_BEST_TEST))
            best_trace['genotype'].append(CURR_BEST_GENOTYPE)
            best_trace['counter'].append(counter)

            if counter >= MAX_BUDGET:
                break

    res = dict()
    res['validation_acc'] = best_trace['validation_acc']
    res['test_acc'] = best_trace['test_acc']
    res['genotype'] = best_trace['genotype']
    res['counter'] = best_trace['counter']
    save_path = os.path.join(args.output_path, 'dim{}'.format(args.dim))
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    print('save to {}'.format(save_path))
    fh = open(
        os.path.join(save_path,
                     'run_{}_arch2vec_model_darts.json'.format(args.seed)),
        'w')
    json.dump(res, fh)
    fh.close()
Exemplo n.º 12
0
def build_model(lower,
                upper,
                model_type="gp_mcmc",
                model_seed=1,
                prior_seed=1):
    """
    General interface for Bayesian optimization for global black box
    optimization problems.

    Parameters
    ----------
    lower: numpy.ndarray (D,)
        The lower bound of the search space
    upper: numpy.ndarray (D,)
        The upper bound of the search space
    model_type: {"gp", "gp_mcmc", "rf", "bohamiann", "dngo"}
        The model for the objective function.
    model_seed: int
        Seed for random number generator of the model 
    prior_seed: int
        Seed for random number generator of the prior

    Returns
    -------
        Model
    """
    assert upper.shape[0] == lower.shape[0], "Dimension miss match"
    assert numpy.all(lower < upper), "Lower bound >= upper bound"

    cov_amp = 2
    n_dims = lower.shape[0]

    initial_ls = numpy.ones([n_dims])
    exp_kernel = george.kernels.Matern52Kernel(initial_ls, ndim=n_dims)
    kernel = cov_amp * exp_kernel

    prior = DefaultPrior(len(kernel) + 1, numpy.random.RandomState(prior_seed))

    n_hypers = 3 * len(kernel)
    if n_hypers % 2 == 1:
        n_hypers += 1

    # NOTE: Some models do not support RNG properly and rely on global RNG state
    #       so we need to seed here as well...
    numpy.random.seed(model_seed)
    model_rng = numpy.random.RandomState(model_seed)
    if model_type == "gp":
        model = GaussianProcess(kernel,
                                prior=prior,
                                rng=model_rng,
                                normalize_output=False,
                                normalize_input=True,
                                lower=lower,
                                upper=upper)
    elif model_type == "gp_mcmc":
        model = GaussianProcessMCMC(kernel,
                                    prior=prior,
                                    n_hypers=n_hypers,
                                    chain_length=200,
                                    burnin_steps=100,
                                    normalize_input=True,
                                    normalize_output=False,
                                    rng=model_rng,
                                    lower=lower,
                                    upper=upper)

    elif model_type == "rf":
        model = RandomForest(rng=model_rng)

    elif model_type == "bohamiann":
        model = WrapperBohamiann()

    elif model_type == "dngo":
        from pybnn.dngo import DNGO
        model = DNGO()

    else:
        raise ValueError("'{}' is not a valid model".format(model_type))

    return model