Esempio n. 1
0
def test_rosen_integers():
    X = [2, 4, 6, 8]
    y = 100*((4 - 2**2)**2) + (2 - 1)**2 \
      + 100*((6 - 4**2)**2) + (4 - 1)**2 \
      + 100*((8 - 6**2)**2) + (6 - 1)**2

    assert functions.Rosenbrock(X) == y
Esempio n. 2
0
def initiate_rosenbrock_data(initial_n=20,
                             effective_dim=2,
                             high_dim=25,
                             replications=100):

    fileObject = open('high_dim_rosenbrock_initial_data', 'wb')
    all_A = np.random.normal(0, 1, [replications, effective_dim, high_dim])
    all_s = np.empty((replications, initial_n, effective_dim))
    all_f_s = np.empty((replications, initial_n, 1))
    test_func = functions.Rosenbrock()
    for i in range(replications):
        cnv_prj = projections.ConvexProjection(all_A[i])
        all_s[i] = lhs(effective_dim, initial_n) * 2 * np.sqrt(
            effective_dim) - np.sqrt(effective_dim)
        all_f_s[i] = test_func.evaluate(cnv_prj.evaluate(all_s[i]))
    pickle.dump(all_A, fileObject)
    pickle.dump(all_s, fileObject)
    pickle.dump(all_f_s, fileObject)
    fileObject.close()
Esempio n. 3
0
def Run_Main(low_dim=2,
             high_dim=20,
             initial_n=20,
             total_itr=100,
             func_type='Branin',
             matrix_type='simple',
             kern_inp_type='Y',
             A_input=None,
             s=None,
             xl=None,
             xu=None,
             active_var=None,
             hyper_opt_interval=20,
             ARD=False,
             variance=1.,
             length_scale=None,
             box_size=None,
             noise_var=0,
             slice_number=None):

    if slice_number is None:
        slice_number = low_dim + 1
    if active_var is None:
        active_var = np.arange(high_dim)
    if box_size is None:
        box_size = math.sqrt(low_dim)
    if hyper_opt_interval is None:
        hyper_opt_interval = 10
    # Specifying the type of objective function
    if func_type == 'Branin':
        test_func = functions.Branin(active_var, noise_var=noise_var)
    elif func_type == 'Rosenbrock':
        test_func = functions.Rosenbrock(active_var, noise_var=noise_var)
    elif func_type == 'Hartmann6':
        test_func = functions.Hartmann6(active_var, noise_var=noise_var)
    elif func_type == 'Col':
        test_func = functions.colville(active_var, noise_var=noise_var)
    elif func_type == 'CAMEL':
        test_func = functions.camel3(active_var, noise_var=noise_var)
    elif func_type == 'MNIST':
        test_func = functions.MNIST(active_var)
    else:
        TypeError('The input for func_type variable is invalid, which is',
                  func_type)
        return

    best_results = np.zeros([1, total_itr + initial_n])
    elapsed = np.zeros([1, total_itr + initial_n])

    # generate embedding matrix via samples
    #f_s = test_func.evaluate(np.array(xl))
    f_s_true = test_func.evaluate_true(xl)
    # get project matrix B using Semi-LSIR
    B = SSIR(low_dim, xl, f_s_true, xu, slice_number, k=3)

    embedding_sample = np.matmul(xl, B)
    for i in range(initial_n):
        best_results[0, i] = np.max(f_s_true[0:i + 1])
    for i in range(initial_n):
        best_results[0, i] = np.max(f_s_true[0:i + 1])

    # Specifying the input type of kernel
    if kern_inp_type == 'Y':
        kern_inp = kernel_inputs.InputY(B)
        input_dim = low_dim
    elif kern_inp_type == 'X':
        kern_inp = kernel_inputs.InputX(B)
        input_dim = high_dim
    elif kern_inp_type == 'psi':
        kern_inp = kernel_inputs.InputPsi(B)
        input_dim = high_dim
    else:
        TypeError('The input for kern_inp_type variable is invalid, which is',
                  kern_inp_type)
        return

    # Generating GP model
    k = GPy.kern.Matern52(input_dim=input_dim,
                          ARD=ARD,
                          variance=variance,
                          lengthscale=length_scale)
    m = GPy.models.GPRegression(kern_inp.evaluate(embedding_sample),
                                f_s_true,
                                kernel=k)
    m.likelihood.variance = 1e-6
    bounds = np.zeros((high_dim, 2))
    bounds[:, 0] = -1
    bounds[:, 1] = 1
    ac = acquisition.ACfunction(B,
                                m,
                                initial_size=initial_n,
                                low_dimension=low_dim)

    for i in range(total_itr):
        start = timeit.default_timer()
        #Updating GP model
        m.set_XY(kern_inp.evaluate(embedding_sample), f_s_true)
        m.optimize()

        #find X to max UCB(BX)
        es = cma.CMAEvolutionStrategy(high_dim * [0], 0.5, {'bounds': [-1, 1]})
        iter = 0
        u = []
        ac.set_fs_true(max(f_s_true))
        #_, maxD = ac.acfunctionEI(max(f_s), low_dim)
        if i != 0 and (i) % 20 == 0:
            print("update")

            while not es.stop() and iter != 2:
                iter += 1
                X = es.ask()
                es.tell(X,
                        [ac.newfunction(x)
                         for x in X])  #set UCB or EI in newfunction() manually
                # if i != 0 and (i) % 10 == 0:
                u.append(es.result[0])
                #es.disp()  # doctest: +ELLIPSIS
            #return candidate X
            maxx = es.result[0].reshape((1, high_dim))
        else:
            while not es.stop() and iter != 2:
                iter += 1
                X = es.ask()
                es.tell(X,
                        [ac.newfunction(x)
                         for x in X])  #set UCB or EI in newfunction() manually
            #es.disp()  # doctest: +ELLIPSIS
            #return candidate X
            maxx = es.result[0].reshape((1, high_dim))

        #_,maxD = ac.acfunctionEI(max(f_s),low_dim)

        #initial qp
        #ac.updateQP(maxD)
        #solve qp
        #res = minimize(ac.qp,np.zeros((high_dim,1)),method='SLSQP',bounds=bounds,options={'maxiter': 5, 'disp': True})

        #maxx = res.x
        #print("qp fun = ",res.fun)

        es = np.matmul(maxx, B)  #maxx:1000*1  B:1000*6
        embedding_sample = np.append(embedding_sample, es, axis=0)
        xl = np.append(xl, maxx, axis=0)
        #f_s = np.append(f_s, test_func.evaluate(maxx), axis=0)
        f_s_true = np.append(f_s_true, test_func.evaluate_true(maxx), axis=0)

        #update project matrix B
        if i != 0 and (i) % 20 == 0:
            print("update")
            #get top "inital_n" from xl
            xlidex = np.argsort(-f_s_true, axis=0).reshape(-1)[:initial_n]
            f_s_special = f_s_true[xlidex]
            xl_special = xl[xlidex]
            #get top unlabeled data from xu
            xu = np.array(u)
            B = SSIR(low_dim, xl_special, f_s_special, xu, slice_number, k=3)
            embedding_sample = np.matmul(xl, B)
            ac.resetflag(B)

        # Collecting data
        stop = timeit.default_timer()
        print("iter = ", i, "maxobj = ", np.max(f_s_true))
        best_results[0, i + initial_n] = np.max(f_s_true)
        elapsed[0, i + initial_n] = stop - start

    return best_results, elapsed, embedding_sample, f_s_true
Esempio n. 4
0
def RunMain(low_dim=2,
            high_dim=25,
            initial_n=20,
            total_itr=100,
            func_type='Branin',
            s=None,
            active_var=None,
            ARD=False,
            variance=1.,
            length_scale=None,
            box_size=None,
            high_to_low=None,
            sign=None,
            hyper_opt_interval=20,
            noise_var=0):
    """

    :param high_dim: the dimension of high dimensional search space
    :param low_dim: The effective dimension of the algorithm.
    :param initial_n: the number of initial points
    :param total_itr: the number of iterations of algorithm. The total
        number of test function evaluations is initial_n + total_itr
    :param func_type: the name of test function
    :param s: initial points
    :param active_var: a vector with the size of greater or equal to
        the number of active variables of test function. The values of
        vector are integers less than high_dim value.
    :param ARD: if TRUE, kernel is isomorphic
    :param variance: signal variance of the kernel
    :param length_scale: length scale values of the kernel
    :param box_size: this variable indicates the search space [-box_size, box_size]^d
    :param high_to_low: a vector with D elements. each element can have a value from {0,..,d-1}
    :param sign: a vector with D elements. each element is either +1 or -1.
    :param hyper_opt_interval: the number of iterations between two consecutive
        hyper parameters optimizations
    :param noise_var: noise variance of the test functions
    :return: a tuple of best values of each iteration, all observed points, and
        corresponding test function values of observed points
    """

    if active_var is None:
        active_var = np.arange(high_dim)
    if box_size is None:
        box_size = 1
    if high_to_low is None:
        high_to_low = np.random.choice(range(low_dim), high_dim)
    if sign is None:
        sign = np.random.choice([-1, 1], high_dim)

    #Specifying the type of objective function
    if func_type == 'Branin':
        test_func = functions.Branin(active_var, noise_var=noise_var)
    elif func_type == 'Rosenbrock':
        test_func = functions.Rosenbrock(active_var, noise_var=noise_var)
    elif func_type == 'Hartmann6':
        test_func = functions.Hartmann6(active_var, noise_var=noise_var)
    elif func_type == 'StybTang':
        test_func = functions.StybTang(active_var, noise_var=noise_var)
    else:
        TypeError('The input for func_type variable is invalid, which is',
                  func_type)
        return

    best_results = np.zeros([1, total_itr + initial_n])
    elapsed = np.zeros([1, total_itr + initial_n])

    # Creating the initial points. The shape of s is nxD
    if s is None:
        s = lhs(low_dim, initial_n) * 2 * box_size - box_size
    f_s = test_func.evaluate(back_projection(s, high_to_low, sign, box_size))
    f_s_true = test_func.evaluate_true(
        back_projection(s, high_to_low, sign, box_size))
    for i in range(initial_n):
        best_results[0, i] = np.max(f_s_true[0:i + 1])

    # Building and fitting a new GP model
    kern = GPy.kern.Matern52(input_dim=low_dim,
                             ARD=ARD,
                             variance=variance,
                             lengthscale=length_scale)
    m = GPy.models.GPRegression(s, f_s, kernel=kern)
    m.likelihood.variance = 1e-3

    # Main loop
    for i in range(total_itr):

        start = timeit.default_timer()

        # Updating GP model
        m.set_XY(s, f_s)
        if (i + initial_n <= 25
                and i % 5 == 0) or (i + initial_n > 25
                                    and i % hyper_opt_interval == 0):
            m.optimize()

        # Maximizing acquisition function
        D = lhs(low_dim, 2000) * 2 * box_size - box_size
        mu, var = m.predict(D)
        ei_d = EI(len(D), max(f_s), mu, var)
        index = np.argmax(ei_d)

        # Adding the new point to our sample
        s = np.append(s, [D[index]], axis=0)
        new_high_point = back_projection(D[index], high_to_low, sign, box_size)
        f_s = np.append(f_s, test_func.evaluate(new_high_point), axis=0)
        f_s_true = np.append(f_s_true,
                             test_func.evaluate_true(new_high_point),
                             axis=0)

        stop = timeit.default_timer()
        best_results[0, i + initial_n] = np.max(f_s_true)
        elapsed[0, i + initial_n] = stop - start

    # if func_type == 'WalkerSpeed':
    #     eng.quit()
    high_s = back_projection(s, high_to_low, sign, box_size)
    return best_results, elapsed, s, f_s, f_s_true, high_s
Esempio n. 5
0
def Run_Main(low_dim=2,
             high_dim=20,
             initial_n=20,
             total_itr=100,
             func_type='Branin',
             matrix_type='simple',
             kern_inp_type='Y',
             A_input=None,
             s=None,
             xl=None,
             xu=None,
             active_var=None,
             hyper_opt_interval=10,
             ARD=False,
             variance=1.,
             length_scale=None,
             box_size=None,
             noise_var=0,
             slice_number=None):

    if slice_number is None:
        slice_number = low_dim + 1
    if active_var is None:
        active_var = np.arange(high_dim)
    if box_size is None:
        box_size = math.sqrt(low_dim)
    if hyper_opt_interval is None:
        hyper_opt_interval = 10
    # Specifying the type of objective function
    if func_type == 'Branin':
        test_func = functions.Branin(active_var, noise_var=noise_var)
    elif func_type == 'Rosenbrock':
        test_func = functions.Rosenbrock(active_var, noise_var=noise_var)
    elif func_type == 'Hartmann6':
        test_func = functions.Hartmann6(active_var, noise_var=noise_var)
    elif func_type == 'StybTang':
        test_func = functions.StybTang(active_var, noise_var=noise_var)
    elif func_type == 'Col':
        test_func = functions.colville(active_var, noise_var=noise_var)
    elif func_type == 'MNIST':
        test_func = functions.MNIST(active_var)
    else:
        TypeError('The input for func_type variable is invalid, which is',
                  func_type)
        return

    best_results = np.zeros([1, total_itr + initial_n])
    elapsed = np.zeros([1, total_itr + initial_n])
    total_best_results = np.zeros([1, total_itr + initial_n])

    # generate embedding matrix via samples
    #f_s = test_func.evaluate(xl)
    f_s_true = test_func.evaluate_true(xl)
    B = SSIR(low_dim, xl, f_s_true, xu, slice_number, k=3)
    Bplus = pinv(B).T  #6*100 with T ; otherwise, 100*6
    cnv_prj = projections.ConvexProjection(Bplus)
    #embedding_sample = np.matmul(xl,B.T)
    box = np.sum(B, axis=1)
    print(box)
    #box_bound = np.empty((2, low_dim))
    # for i in range(low_dim):
    #     for j in range(2):
    #         if j == 0:
    #             box_bound[j][i] = -np.abs(box[i])
    #         else:
    #             box_bound[j][i] = np.abs(box[i])

    # Initiating first sample
    if s is None:
        #s = lhs(low_dim, initial_n) * 2 * box_size - box_size
        # D = []
        # for i in range(low_dim):
        #     D.append(lhs(1, initial_n) * 2 * np.abs(box[i]) - np.abs(box[i]))
        s = lhs(low_dim, 2000) * 2 * np.max(np.abs(box)) - np.max(np.abs(box))
        #s = np.array(D).reshape((initial_n,low_dim))

    # get low-dimensional representations
    s = np.matmul(xl, B.T)

    for i in range(initial_n):
        best_results[0, i] = np.max(f_s_true[0:i + 1])
    for i in range(initial_n):
        best_results[0, i] = np.max(f_s_true[0:i + 1])

    # Specifying the input type of kernel
    kern_inp, input_dim = specifyKernel("Y",
                                        Bplus=Bplus,
                                        low_dim=low_dim,
                                        high_dim=high_dim)

    # Generating GP model
    k = GPy.kern.Matern52(input_dim=input_dim,
                          ARD=ARD,
                          variance=variance,
                          lengthscale=length_scale)
    m = GPy.models.GPRegression(kern_inp.evaluate(s), f_s_true, kernel=k)
    m.likelihood.variance = 1e-6
    ac = acquisition.ACfunction(B,
                                m,
                                initial_size=initial_n,
                                low_dimension=low_dim)
    # Main loop of the algorithm
    ei_d = 0
    D = 0
    for i in range(total_itr):
        print("i = ", i)
        start = timeit.default_timer()
        #update project matrix every 20 iterations
        if i != 0 and (i) % 20 == 0:
            print("update")
            idx = np.argsort(
                np.array(-ei_d),
                axis=0).reshape(-1)[:100]  #get 100 unlabeled data index
            xu = cnv_prj.evaluate(
                D[idx])  #project the unlabeled data to high-dimensional space
            xlidex = np.argsort(-f_s_true, axis=0).reshape(
                -1)[:initial_n]  # get 'inital_n' labeled data index
            xl_special = cnv_prj.evaluate(
                s[xlidex])  #project the labeled data to high-dimensional space
            f_s_special = f_s_true[
                xlidex]  # evaluate the labeled data to get response value
            B = SSIR(low_dim, xl_special, f_s_special, xu, slice_number,
                     k=3)  # perform SEMI-LSIR to update B
            Bplus = pinv(B).T
            specifyKernel("Y", B, low_dim, high_dim)
            cnv_prj = projections.ConvexProjection(Bplus)
            box = np.sum(B, axis=1)  # update low-dimensional search space
            #f_s = test_func.evaluate(cnv_prj.evaluate(s))
            f_s_true = test_func.evaluate_true(cnv_prj.evaluate(s))
            print(box)

        # Updating GP model
        m.set_XY(kern_inp.evaluate(s), f_s_true)
        #if (i + initial_n <= 25 and i % 5 == 0) or (i + initial_n > 25 and i % hyper_opt_interval == 0):
        m.optimize()

        # finding the next point for sampling
        # D = []
        # for a in range(low_dim):
        #     D.append(lhs(1, 2000) * 2 * np.abs(box[a])  - np.abs(box[a]))
        # D = np.array(D).reshape((2000, low_dim))
        D = lhs(low_dim, 2000) * 2 * np.max(np.abs(box)) - np.max(np.abs(box))
        #D = lhs(low_dim, 2000) * 2 * box_size - box_size
        #test = kern_inp.evaluate(D)

        mu, var = m.predict(kern_inp.evaluate(D))
        #UCB
        ei_d = ac.originalUCB(mu, var)
        #EI
        #ei_d = EI(len(D), max(f_s_true), mu, var)
        index = np.argmax(ei_d)

        #xl = np.append(xl,cnv_prj.evaluate([D[index]]),axis=0)
        s = np.append(s, [D[index]], axis=0)
        #f_s = np.append(f_s, test_func.evaluate(cnv_prj.evaluate([D[index]])), axis=0)
        f_s_true = np.append(f_s_true,
                             test_func.evaluate_true(
                                 cnv_prj.evaluate([D[index]])),
                             axis=0)

        # Collecting data
        stop = timeit.default_timer()
        print("iter = ", i, "maxobj = ", np.max(f_s_true))
        best_results[0, i + initial_n] = np.max(f_s_true)
        elapsed[0, i + initial_n] = stop - start
    for i in range(initial_n + total_itr):
        total_best_results[0, i] = np.max(best_results[0, :i + 1])

    # if func_type == 'WalkerSpeed':
    #     eng.quit()

    return total_best_results, elapsed, s, f_s_true  #cnv_prj.evaluate(s)
Esempio n. 6
0
def Run_Main(low_dim=2, high_dim=20, initial_n=20, total_itr=100, func_type='Branin',
             matrix_type='simple', kern_inp_type='Y', A_input=None, s=None, active_var=None,
             hyper_opt_interval=20, ARD=False, variance=1., length_scale=None, box_size=None,
             noise_var=0,slice_number=None):

    if slice_number is None:
        slice_number = low_dim+1
    if active_var is None:
        active_var= np.arange(high_dim)
    if box_size is None:
        box_size = math.sqrt(low_dim)
    if hyper_opt_interval is None:
        hyper_opt_interval = 10
    # Specifying the type of objective function
    if func_type == 'Branin':
        test_func = functions.Branin(active_var, noise_var=noise_var)
    elif func_type == 'Rosenbrock':
        test_func = functions.Rosenbrock(active_var, noise_var=noise_var)
    elif func_type == 'Hartmann6':
        test_func = functions.Hartmann6(active_var, noise_var=noise_var)
    elif func_type == 'Col':
        test_func = functions.colville(active_var, noise_var=noise_var)
    elif func_type == 'MNIST':
        test_func = functions.MNIST(active_var)
    elif func_type == 'CAMEL':
        test_func = functions.camel3(active_var, noise_var=noise_var)
    else:
        TypeError('The input for func_type variable is invalid, which is', func_type)
        return

    best_results = np.zeros([1, total_itr + initial_n])
    elapsed = np.zeros([1, total_itr + initial_n])


    # generate embedding matrix via samples
    #f_s = test_func.evaluate(np.array(s))
    f_s_true = test_func.evaluate_true(s)
    B = SIR(low_dim,s,f_s_true,slice_number)



    embedding_sample = np.matmul(s,B)
    for i in range(initial_n):
        best_results[0, i] = np.max(f_s_true[0:i + 1])
    for i in range(initial_n):
        best_results[0,i]=np.max(f_s_true[0:i+1])

    # Specifying the input type of kernel
    if kern_inp_type == 'Y':
        kern_inp = kernel_inputs.InputY(B)
        input_dim = low_dim
    elif kern_inp_type == 'X':
        kern_inp = kernel_inputs.InputX(B)
        input_dim = high_dim
    elif kern_inp_type == 'psi':
        kern_inp = kernel_inputs.InputPsi(B)
        input_dim = high_dim
    else:
        TypeError('The input for kern_inp_type variable is invalid, which is', kern_inp_type)
        return


    # Generating GP model
    k = GPy.kern.Matern52(input_dim=input_dim, ARD=ARD, variance=variance, lengthscale=length_scale)
    m = GPy.models.GPRegression(kern_inp.evaluate(embedding_sample), f_s_true, kernel=k)
    m.likelihood.variance = 1e-6
    bounds = np.zeros((high_dim,2))
    bounds[:,0]=-1
    bounds[:,1]=1
    ac = acquisition.ACfunction(B,m,initial_size=initial_n,low_dimension=low_dim)

    for i in range(total_itr):
        start = timeit.default_timer()
        #Updating GP model
        m.set_XY(kern_inp.evaluate(embedding_sample),f_s_true)
        m.optimize()
        #CMA_ES
        # D = lhs(high_dim, 2000) * 2 * box_size - box_size
        # ac_value = AC_function(m,B,D)
        # solution = np.concatenate((D,ac_value),axis=1)
        #
        # for item in solution:
        #     solutions.append((item[:-1],item[-1]))
        # cma_iteration = 5
        # res = np.zeros((cma_iteration,high_dim))
        # keep = 0
        # for generation in range(cma_iteration):
        #     solutions = []
        #     for _ in range(cma_es.population_size):
        #         x = cma_es.ask()
        #         value = -AC_function(m, B, x).reshape(1)
        #         solutions.append((x, float(value)))
        #         #print("generation = ", generation,"value = ",value,"\n")
        #     cma_es.tell(solutions)
        #     a = 0
        #     for sol in solutions:
        #         if sol[1]<keep:
        #             keep = sol[1]
        #             a =np.array(sol[0]).reshape((1,high_dim))
        #     res[generation] = a[:]
        # maxx = res[cma_iteration-1]
        #*****
        #D = lhs(high_dim, 2000) * 2 * box_size - box_size
        #test = ac.acfunctionUCB(D)


        #*****
        es = cma.CMAEvolutionStrategy(high_dim * [0], 0.5, {'bounds': [-1, 1]})
        iter = 0
        while not es.stop() and iter !=2:
            iter+=1
            X = es.ask()
            es.tell(X, [ac.acfunctionUCB(x) for x in X])
            #es.disp()  # doctest: +ELLIPSIS
        #es.optimize(cma.ff.rosen)
        #es.optimize(acfunction.acfunction)
        maxx = es.result[0]

        s = np.matmul(maxx.T,B).reshape((1,low_dim)) #maxx:1000*1  B:1000*6
        embedding_sample = np.append(embedding_sample, s, axis=0)
        #f_s = np.append(f_s, test_func.evaluate(maxx), axis=0 )
        f_s_true = np.append(f_s_true, test_func.evaluate_true(maxx),axis=0)

        # Collecting data
        stop = timeit.default_timer()
        print("iter = ", i, "maxobj = ", np.max(f_s_true))
        best_results[0, i + initial_n] = np.max(f_s_true)
        elapsed[0, i + initial_n] = stop - start



    return best_results, elapsed, embedding_sample, f_s_true
Esempio n. 7
0
def RunRembo(low_dim=2, high_dim=20, initial_n=20, total_itr=100, func_type='Branin',
             matrix_type='simple', kern_inp_type='Y', A_input=None, s=None, active_var=None,
             hyper_opt_interval=20, ARD=False, variance=1., length_scale=None, box_size=None,
             noise_var=0):
    """"

    :param low_dim: the dimension of low dimensional search space
    :param high_dim: the dimension of high dimensional search space
    :param initial_n: the number of initial points
    :param total_itr: the number of iterations of algorithm. The total
        number of test function evaluations is initial_n + total_itr
    :param func_type: the name of test function
    :param matrix_type: the type of projection matrix
    :param kern_inp_type: the type of projection. Projected points
        are used as the input of kernel
    :param A_input: a projection matrix with iid gaussian elements.
        The size of matrix is low_dim * high_dim
    :param s: initial points
    :param active_var: a vector with the size of greater or equal to
        the number of active variables of test function. The values of
        vector are integers less than high_dim value.
    :param hyper_opt_interval: the number of iterations between two consecutive
        hyper parameters optimizations
    :param ARD: if TRUE, kernel is isomorphic
    :param variance: signal variance of the kernel
    :param length_scale: length scale values of the kernel
    :param box_size: this variable indicates the search space [-box_size, box_size]^d
    :param noise_var: noise variance of the test functions
    :return: a tuple of best values of each iteration, all observed points, and
        corresponding test function values of observed points
    """

    if active_var is None:
        active_var= np.arange(high_dim)
    if box_size is None:
        box_size=math.sqrt(low_dim)
    if hyper_opt_interval is None:
        hyper_opt_interval = 10

    #Specifying the type of objective function
    if func_type=='Branin':
        test_func = functions.Branin(active_var, noise_var=noise_var)
    elif func_type=='Rosenbrock':
        test_func = functions.Rosenbrock(active_var, noise_var=noise_var)
    elif func_type=='Hartmann6':
        test_func = functions.Hartmann6(active_var, noise_var=noise_var)
    elif func_type == 'StybTang':
        test_func = functions.StybTang(active_var, noise_var=noise_var)
    else:
        TypeError('The input for func_type variable is invalid, which is', func_type)
        return

    #Specifying the type of embedding matrix
    if matrix_type=='simple':
        matrix=projection_matrix.SimpleGaussian(low_dim, high_dim)
    elif matrix_type=='normal':
        matrix= projection_matrix.Normalized(low_dim, high_dim)
    elif matrix_type=='orthogonal':
        matrix = projection_matrix.Orthogonalized(low_dim, high_dim)
    else:
        TypeError('The input for matrix_type variable is invalid, which is', matrix_type)
        return

    # Generating matrix A
    if A_input is not None:
        matrix.A = A_input

    A = matrix.evaluate()

    #Specifying the input type of kernel
    if kern_inp_type=='Y':
        kern_inp = kernel_inputs.InputY(A)
        input_dim=low_dim
    elif kern_inp_type=='X':
        kern_inp = kernel_inputs.InputX(A)
        input_dim = high_dim
    elif kern_inp_type == 'psi':
        kern_inp = kernel_inputs.InputPsi(A)
        input_dim = high_dim
    else:
        TypeError('The input for kern_inp_type variable is invalid, which is', kern_inp_type)
        return

    #Specifying the convex projection
    cnv_prj=projections.ConvexProjection(A)

    best_results=np.zeros([1,total_itr + initial_n])
    elapsed = np.zeros([1, total_itr + initial_n])

    # Initiating first sample    # Sample points are in [-d^1/2, d^1/2]
    if s is None:
        s = lhs(low_dim, initial_n) * 2 * box_size - box_size
    f_s = test_func.evaluate(cnv_prj.evaluate(s))
    f_s_true = test_func.evaluate_true(cnv_prj.evaluate(s))
    for i in range(initial_n):
        best_results[0,i]=np.max(f_s_true[0:i+1])

    # Generating GP model
    k = GPy.kern.Matern52(input_dim=input_dim, ARD=ARD, variance=variance, lengthscale=length_scale)
    m = GPy.models.GPRegression(kern_inp.evaluate(s), f_s, kernel=k)
    m.likelihood.variance = 1e-6

    # Main loop of the algorithm
    for i in range(total_itr):

        start = timeit.default_timer()
        # Updating GP model
        m.set_XY(kern_inp.evaluate(s),f_s)
        if (i+initial_n<=25 and i % 5 == 0) or (i+initial_n>25 and i % hyper_opt_interval == 0):
            m.optimize()

        # finding the next point for sampling
        D = lhs(low_dim, 2000) * 2 * box_size - box_size
        mu, var = m.predict(kern_inp.evaluate(D))
        ei_d = EI(len(D), max(f_s), mu, var)
        index = np.argmax(ei_d)
        s = np.append(s, [D[index]], axis=0)
        f_s = np.append(f_s, test_func.evaluate(cnv_prj.evaluate([D[index]])), axis=0)
        f_s_true = np.append(f_s_true, test_func.evaluate_true(cnv_prj.evaluate([D[index]])), axis=0)

        #Collecting data
        stop = timeit.default_timer()
        best_results[0,i + initial_n]=np.max(f_s_true)
        elapsed[0, i + initial_n] = stop - start

    # if func_type == 'WalkerSpeed':
    #     eng.quit()

    return best_results, elapsed, s, f_s, f_s_true, cnv_prj.evaluate(s)
Esempio n. 8
0
def RunRembo(low_dim=2,
             high_dim=20,
             initial_n=20,
             total_itr=100,
             func_type='Branin',
             matrix_type='simple',
             kern_inp_type='Y',
             A_input=None,
             s=None,
             f_s=None):

    #Specifying the type of objective function
    if func_type == 'Branin':
        test_func = functions.Branin()
    elif func_type == 'Rosenbrock':
        test_func = functions.Rosenbrock()
    elif func_type == 'Hartmann6':
        test_func = functions.Hartmann6()
    else:
        TypeError('The input for func_type variable is invalid, which is',
                  func_type)
        return

    #Specifying the type of embedding matrix
    if matrix_type == 'simple':
        matrix = projection_matrix.SimpleGaussian(low_dim, high_dim)
    elif matrix_type == 'normal':
        matrix = projection_matrix.Normalized(low_dim, high_dim)
    elif matrix_type == 'orthogonal':
        matrix = projection_matrix.Orthogonalized(low_dim, high_dim)
    else:
        TypeError('The input for matrix_type variable is invalid, which is',
                  matrix_type)
        return

    # Generating matrix A
    if A_input is not None:
        matrix.A = A_input

    A = matrix.evaluate()

    #Specifying the input type of kernel
    if kern_inp_type == 'Y':
        kern_inp = kernel_inputs.InputY(A)
    elif kern_inp_type == 'X':
        kern_inp = kernel_inputs.InputX(A)
    elif kern_inp_type == 'psi':
        kern_inp = kernel_inputs.InputPsi(A)
    else:
        TypeError('The input for kern_inp_type variable is invalid, which is',
                  kern_inp_type)
        return

    #Specifying the convex projection
    cnv_prj = projections.ConvexProjection(A)

    best_results = np.zeros([1, total_itr])
    # Initiating first sample    # Sample points are in [-d^1/2, d^1/2]
    if s is None:
        s = lhs(low_dim,
                initial_n) * 2 * math.sqrt(low_dim) - math.sqrt(low_dim)
        f_s = test_func.evaluate(cnv_prj.evaluate(s))

    # Generating GP model
    k = CustomMatern52(input_dim=low_dim, input_type=kern_inp)
    m = GPy.models.GPRegression(s, f_s, kernel=k)
    m.likelihood.variance = 1e-6
    m.optimize()

    # Main loop of the algorithm
    for i in range(total_itr):
        D = lhs(low_dim, 1000) * 2 * math.sqrt(low_dim) - math.sqrt(low_dim)

        # Updating GP model
        m.set_XY(s, f_s)
        if (i + 1) % 5 == 0:
            m.optimize()
        mu, var = m.predict(D)

        # finding the next point for sampling
        ei_d = EI(len(D), max(f_s), mu, var)
        index = np.argmax(ei_d)
        s = np.append(s, [D[index]], axis=0)
        f_s = np.append(f_s,
                        test_func.evaluate(cnv_prj.evaluate([D[index]])),
                        axis=0)

        #Collecting data
        best_results[0, i] = np.max(f_s)

    # max_index = np.argmax(f_s)
    # max_point = s[max_index]
    # max_value = f_s[max_index]
    #
    # print('The best value is:  ', max_value,
    #       '\n \n at the point:  ', max_point,
    #       '\n \n with Ay value:  ', test_func.scale_domain(cnv_prj.evaluate([max_point])),
    #       '\n\nin the iteration:  ', max_index)
    return best_results, s, f_s
Esempio n. 9
0
def test_rosen_decimals():
    X = [0.5, 0.1]
    y = 100 * ((0.1 - 0.5**2)**2) + (0.5 - 1)**2

    assert functions.Rosenbrock(X) == y