Esempio n. 1
0
def plot_2d_evo(popi):

    magnet_dim = len(popi.get_x()[0])
    ndf, dl, dc, ndl = pg.fast_non_dominated_sorting(popi.get_f()[:, :2])
    #    print(pg.sort_population_mo(popi.get_f()))
    ndf_champ = []

    #    for j in range(2,200):
    #    print(pg.ideal(popi.get_f())[0])
    print(ndf)
    #    x = np.linspace(pg.ideal(popi.get_f())[0],0)
    #    y = np.zeros(50)+(1)
    old_ndf, new_ndf = np.zeros(1), np.zeros(1)
    n_plots = 0
    for j in range(2, len(popi.get_f())):
        #        print(x,y)
        #        plt.plot(x,y,linestyle="dashed",color="red")
        #        print(ndf)
        #        ndf_champ.append([popi.get_f()[i] for i in ndf[j]])
        #        ax = pg.plot_non_dominated_fronts(ndf_champ[0],comp=[0,1])
        #    ax.plot(color="C{}".format(j))
        ndf, dl, dc, ndl = pg.fast_non_dominated_sorting(popi.get_f()[0:j, :2])
        #        print(ndf)
        new_ndf = np.array(ndf[0])
        if not np.array_equal(new_ndf, old_ndf):
            if len(ndf[0]) <= 1:
                continue
            n_plots += 1
            plt.cla()
            ax = pg.plot_non_dominated_fronts(popi.get_f()[ndf[0]],
                                              comp=[0, 1])
            ax.set_ylabel(fNames[1])
            ax.set_xlabel(fNames[0])
            ax.set_ylim(1e-1, 1e1)
            ax.set_yscale('log')
            ax.set_xlim(0.1, 10.0)
            ax.set_xscale('log')
            ax.axvline(x=fNom[0], linestyle="dashed", color="red")
            #            axs[plot_y].axvline(x=best_point[0],linestyle="dotted",color="blue")
            ax.axhline(y=1.0, linestyle="dashed", color="red")
            #            ax.set_xlabel('resolution')
            #            ax.set_ylabel('xangle_e_min')
            #            ax.set_ylim(1e-3,1000)
            #            ax.set_yscale('log')
            #    pri    nt(ndf_champ, ndf[0])
            #    pri    nt(ndf)
            plt.savefig("popi{}".format(n_plots))
        old_ndf = np.array(ndf[0])
    return
Esempio n. 2
0
    def _calc_pareto_front(self, *args, **kwargs):
        # args[0] should be f
        ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(args[0])
        p = args[0][ndf, :]

        # base class has this assignment
        return p
Esempio n. 3
0
def my_print(ad, pop, _):
    # ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(fits)
    # extract and print non-dominated fronts
    # - ndf (list of 1D NumPy int array): the non dominated fronts
    # - dl (list of 1D NumPy int array): the domination list
    # - dc (1D NumPy int array): the domination count
    # - ndr (1D NumPy int array): the non domination ranks
    fits, vectors = pop.get_f(), pop.get_x()
    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(fits)

    with open(ad.solver.output_dir + 'MOO_log.txt', 'a',
              encoding='utf-8') as fname:
        print('-' * 40, 'Generation:', _, file=fname)
        for rank_minus_1, front in enumerate(ndf):
            print('Rank/Tier', rank_minus_1 + 1, front, file=fname)
        index = 0
        for domination_list, domination_count, non_domination_rank in zip(
                dl, dc, ndr):
            print('Individual #%d\t' % (index),
                  'Belong to Rank #%d\t' % (non_domination_rank),
                  'Dominating',
                  domination_count,
                  'and they are',
                  domination_list,
                  file=fname)
            index += 1

        # print(fits, vectors, ndf)
        print(pop, file=fname)
Esempio n. 4
0
def parEGO_out_process():

    parEGO_folder_name = 'parEGO_out\\ZDT'
    for i in np.arange(1, 5):
        out_file = parEGO_folder_name + str(i) + '.txt'
        f = np.genfromtxt(out_file, delimiter='\t')

        f = np.atleast_2d(f).reshape(-1, 2)
        ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(f)
        ndf = list(ndf)
        f_pareto = f[ndf[0], :]


        #ego
        output_folder_name = 'outputs\\' + 'ZDT' + str(i)
        if os.path.exists(output_folder_name):
            output_f_name = output_folder_name + '\\best_f_seed_100.joblib'
            best_f_ego = load(output_f_name)
        else:
            raise ValueError(
                "results folder for EGO does not exist"
            )


        problem_obj = 'ZDT' + str(i) + '(n_var=3)'
        problem = eval(problem_obj)
        true_pf = problem.pareto_front()

        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.scatter(f_pareto[:, 0], f_pareto[:, 1], c='b', marker='o')
        ax.scatter(true_pf[:, 0], true_pf[:, 1], c='r', marker='x')
        ax.scatter(best_f_ego[:, 0], best_f_ego[:, 1], c='g', marker='d')
        plt.title(problem_obj)
        plt.show()
Esempio n. 5
0
def plot_2d_evo(popi):

    magnet_dim = len(popi.get_x()[0])
    ndf, dl, dc, ndl = pg.fast_non_dominated_sorting(popi.get_f())
    print(pg.sort_population_mo(popi.get_f()))
    ndf_champ = []
#    for j in range(2,200):
    print(pg.ideal(popi.get_f())[0])
    x = np.linspace(pg.ideal(popi.get_f())[0],0)
    y = np.zeros(50)+(1)
    for j in range(2,100):
        plt.cla()
        plt.plot(x,y,linestyle="dashed",color="red")
        print(ndf[0])
        ndf_champ.append([popi.get_f()[i] for i in ndf[j]])
#        ax = pg.plot_non_dominated_fronts(ndf_champ[0],comp=[0,j])
#    ax.plot(color="C{}".format(j))
        ax = pg.plot_non_dominated_fronts(popi.get_f()[0:j])
        ax.set_xlabel('resolution')
        ax.set_ylabel('xangle_e_min')
        ax.set_ylim(1e-3,1000)
        ax.set_yscale('log')
#    print(ndf_champ, ndf[0])
#    print(ndf)
        plt.savefig("popi{}".format(j))
    return
Esempio n. 6
0
def normalization_with_nd(y):
    y = check_array(y)
    n_obj = y.shape[1]
    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(y)
    ndf = list(ndf)
    ndf_size = len(ndf)
    # extract nd for normalization
    if len(ndf[0]) > 1:
        ndf_extend = ndf[0]
    else:
        ndf_extend = np.append(ndf[0], ndf[1])

    nd_front = y[ndf_extend, :]

    # normalization boundary
    min_nd_by_feature = np.amin(nd_front, axis=0)
    max_nd_by_feature = np.amax(nd_front, axis=0)

    if np.any(max_nd_by_feature - min_nd_by_feature < 1e-5):
        print('nd front aligned problem, re-select nd front')
        ndf_index = ndf[0]
        for k in np.arange(1, ndf_size):
            ndf_index = np.append(ndf_index, ndf[k])
            nd_front = y[ndf_index, :]
            min_nd_by_feature = np.amin(nd_front, axis=0)
            max_nd_by_feature = np.amax(nd_front, axis=0)
            if np.any(max_nd_by_feature - min_nd_by_feature < 1e-5):
                continue
            else:
                break
    norm_y = (y - min_nd_by_feature) / (max_nd_by_feature - min_nd_by_feature)
    return norm_y
Esempio n. 7
0
def output_2d_cosy(popi,filename):

    hv = pg.hypervolume(popi)
    ref_point = hv.refpoint()
    best_point = (popi.get_f()[hv.greatest_contributor(ref_point)])
    ndf, dl, dc, ndl = pg.fast_non_dominated_sorting(popi.get_f())
    magnet_dim = len(popi.get_x()[0])
    ndf_champ = []
    sorted_ndf = []
    sort_param = 3
    for i in ndf[0]:
        if i == ndf[0][0]:
            sorted_ndf.append(i)
        else:
            for j in range(len(sorted_ndf)):
                if j == len(sorted_ndf)-1:
                    sorted_ndf.append(i)
                    break
                elif j == 0 and popi.get_f()[i][sort_param] < popi.get_f()[sorted_ndf[j]][sort_param]:
                    sorted_ndf.insert(j,i)
                    break
                elif (popi.get_f()[i][sort_param] < popi.get_f()[sorted_ndf[j]][sort_param]) and j>0:
                    if(popi.get_f()[i][sort_param] >= popi.get_f()[sorted_ndf[j-1]][sort_param]):
#                        print(popi.get_f()[i][0],popi.get_f()[sorted_ndf[j]][0],popi.get_f()[sorted_ndf[j-1]][0])
                        sorted_ndf.insert(j,i)
                    break 
    print(ndf[0], sorted_ndf)
    
    for i in range(len(sorted_ndf)):
        j = sorted_ndf[i] 
        write_fox(np.power(np.zeros(magnet_dim)+2,popi.get_x()[j]), i, "2f_FP3/")
    return
Esempio n. 8
0
def save_hv_igd(train_x, train_y, hv_ref, seed_index, target_problem,
                method_selection):
    problem_name = target_problem.name()
    n_x = train_x.shape[0]
    nd_front_index = return_nd_front(train_y)
    nd_front = train_y[nd_front_index, :]
    hv = return_hv(nd_front, hv_ref, target_problem)

    # for igd, only consider first front
    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(train_y)
    ndf = list(ndf)
    nd_front = train_y[ndf[0], :]
    igd = return_igd(target_problem, 10000, nd_front)

    save = [hv, igd]
    print(
        'sample size %d, final save hv of current nd_front: %.4f, igd is: %.4f'
        % (n_x, hv, igd))

    working_folder = os.getcwd()
    result_folder = working_folder + '\\outputs' + '\\' + problem_name + '_' + method_selection
    if not os.path.isdir(result_folder):
        # shutil.rmtree(result_folder)
        # os.mkdir(result_folder)
        os.mkdir(result_folder)
    saveName = result_folder + '\\hv_igd_' + str(seed_index) + '.csv'
    np.savetxt(saveName, save, delimiter=',')
Esempio n. 9
0
def multi_obj_pairs(dpairs0=None, dpframe=None):
    '''
    Input:
        dpairs0
        dpairs1
        type
        type_parameters

    Output:
        dpairs

    pygmo will return the index of best pairs , which can then be picked from dpairs
    first find non-dominated fronts and then sort by second 'TYPE2'
    '''
    import pygmo as pg
    import numpy as np
    import pandas as pd

    yt = dpframe.iloc[:, 2:].values

    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(
        points=np.array(yt, dtype='float64'))

    ndf2 = []
    for frnt in ndf:
        # frnt = ndf[0]
        tmp_frnt = np.argsort(yt[:, 1][frnt])
        ndf2.append(frnt[tmp_frnt])

    sf = np.concatenate(ndf2)
    # Invest equally in it
    fn_dpairs = dpairs0[sf]

    return fn_dpairs
Esempio n. 10
0
def findParetoIs(objectivesList, bolMinimize):
    '''Returns a list of booleans corresponsing to sets of objectives.
    True is nondominates, False is dominated'''
      
    # Negate values for maximization
    if bolMinimize == False:
        minObjectivesList = []
         
        for objs in objectivesList:
            minObjectivesList.append(list(map(lambda y : y * -1, objs)))
    else:
        minObjectivesList = objectivesList
        
    # Dc is a list with domination counts, 0 == non-dominated
    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(minObjectivesList)
    
    # Fill list with booleans
    nondominatedIs = []
    
    for count in dc:
        if count == 0:
            nondominatedIs.append(True)
        else:
            nondominatedIs.append(False)
    	
    return nondominatedIs
def find_n_best(df, n):
    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(-1.0 * df.to_numpy())
    # print('non dom fronts', ndf)
    # print(ndf[0])
    print('Best: \n', df.iloc[ndf[0][:n]])
    print('Worse: \n', df.iloc[ndf[-2][:n]])
    print('Worst: \n', df.iloc[ndf[-1][:n]])
    return df.sample(n=n)
Esempio n. 12
0
def save_pareto_front(train_y, filename):

    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(train_y)
    ndf = list(ndf)
    f_pareto = train_y[ndf[0], :]
    best_f_out = f_pareto
    dump(best_f_out, filename)
    '''
Esempio n. 13
0
def pareto_fronts_probabilities(partition_list, beta, probability_method):
    """Get non-dominated fronts and its probabilities
   Parameters
   ----------
   ...
   
   Returns
   ----------
   num_fronts : int
      Number of non-dominated fronts
   ndr : numpy.ndarray
      Non-domination ranks
   probability : dictionary
      Dictionary with subproblem as keys and assigned probability values
   """

    # Get (-|Z_E|, -|Z_std|) points for all subproblems
    points = [[
        -(abs(subproblem['Z_E'])), -(abs(1 - abs(subproblem['Z_std'])))
    ] for subproblem in partition_list]

    # Obtain the non-dominated sorting of points
    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(points=points)

    num_fronts = len(ndf)

    # Calculate probability at for all fronts.
    front_prob = {}

    if probability_method == "Pareto_Inverse":
        # Calculating denominator
        denominator = 0
        for i in range(num_fronts):
            denominator = denominator + len(ndf[i]) * ((1 / (i + 1))**beta)

        # Calculating probability
        for i in range(num_fronts):
            front_prob[i] = ((1 / (i + 1))**beta) / denominator

    elif probability_method == "Pareto_Boltzman":
        # Calculating denominator
        denominator = 0
        for i in range(num_fronts):
            denominator = denominator + len(ndf[i]) * (np.e**(-beta * (i + 1)))

        # Calculating probability
        for i in range(num_fronts):
            front_prob[i] = (np.e**(-beta * (i + 1))) / denominator

    # Calculate probability for all elements of partition list
    prob_dict = {}
    for i in range(len(ndf)):
        for j in range(len(ndf[i])):
            prob_dict[ndf[i][j]] = front_prob[i]

    probabilities = [i for (j, i) in sorted(prob_dict.items())]

    return num_fronts, ndf, probabilities
Esempio n. 14
0
def ego_outputs_read(prob):
    output_folder_name = 'outputs\\' + prob
    output_f_name = output_folder_name + '\\' + 'best_f_seed_' + str(100) + '.joblib'
    best_f = load(output_f_name)
    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(best_f)
    ndf = list(ndf)
    f_pareto = best_f[ndf[0], :]
    test_f = np.sum(f_pareto, axis=1)
    return f_pareto
Esempio n. 15
0
def calcula_tempo(func):
    pop = lhs(3, 100)
    ini = time()
    f1 = fnds(pop, func)
    print('meu fnds: ', time() - ini)
    ini = time()
    f2, _, _, _ = pg.fast_non_dominated_sorting(points=pop)
    print('pygmo fnds: ', time() - ini)
    return f1, f2
Esempio n. 16
0
def return_nd_front(train_y):
    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(train_y)
    ndf = list(ndf)

    # extract nd for normalization
    if len(ndf[0]) > 1:
        ndf_extend = ndf[0]
    else:
        ndf_extend = np.append(ndf[0], ndf[1])

    return ndf_extend
Esempio n. 17
0
def get_best(values, ignore_idx=None):
    """Assumping everything are to MAXIMIZED.
        Return the best value indices
       https://esa.github.io/pagmo2/docs/python/utils/py_mo_utils.html
    """
    V = np.array(values)
    for idx in ignore_idx:
        V[:, idx] = 0
    _, _, dc, _ = pg.fast_non_dominated_sorting(points=-1 * V)

    return [i for i, v in enumerate(dc) if v == 0]
Esempio n. 18
0
def post_process(train_x, train_y, cons_y, target_problem, seed_index,
                 method_selection, run_signature):

    n_sur_objs = target_problem.n_obj
    n_sur_cons = target_problem.n_constr
    # output best archive solutions
    sample_n = train_x.shape[0]
    a = np.linspace(0, sample_n - 1, sample_n, dtype=int)
    out = {}
    target_problem._evaluate(train_x, out)
    if 'G' in out.keys():
        mu_g = out['G']
        mu_g = np.atleast_2d(mu_g).reshape(-1, n_sur_cons)

        mu_g[mu_g <= 0] = 0
        mu_cv = mu_g.sum(axis=1)
        infeasible = np.nonzero(mu_cv)
        feasible = np.setdiff1d(a, infeasible)

        feasible_solutions = train_x[feasible, :]
        feasible_f = train_y[feasible, :]

        n = len(feasible_f)
        # print('number of feasible solutions in total %d solutions is %d ' % (sample_n, n))

        if n > 0:
            best_f = np.argmin(feasible_f, axis=0)
            print('Best solutions encountered so far')
            print(feasible_f[best_f, :])
            best_f_out = feasible_f[best_f, :]
            best_x_out = feasible_solutions[best_f, :]
            print(feasible_solutions[best_f, :])
        else:
            best_f_out = None
            best_x_out = None
            print('No best solutions encountered so far')
    elif n_sur_objs == 1:
        best_f = np.argmin(train_y, axis=0)
        best_f_out = train_y[best_f, :]
        best_x_out = train_x[best_f, :]
    else:
        # print('MO save pareto front from all y')
        ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(train_y)
        ndf = list(ndf)
        f_pareto = train_y[ndf[0], :]
        best_f_out = f_pareto
        best_x_out = train_x[ndf[0], :]

    savename_x, savename_f, savename_FEs = saveNameConstr(
        target_problem.name(), seed_index, method_selection, run_signature)

    dump(best_x_out, savename_x)
    dump(best_f_out, savename_f)
Esempio n. 19
0
def my_plot_non_dominated_fronts(points,
                                 marker='o',
                                 comp=[0, 1],
                                 up_to_rank_no=None):
    # We plot
    fronts, _, _, _ = pg.fast_non_dominated_sorting(points)

    # We define the colors of the fronts (grayscale from black to white)
    if up_to_rank_no is None:
        cl = list(
            zip(np.linspace(0.1, 0.9, len(fronts)),
                np.linspace(0.1, 0.9, len(fronts)),
                np.linspace(0.1, 0.9, len(fronts))))
    else:
        cl = list(
            zip(np.linspace(0.1, 0.9, up_to_rank_no),
                np.linspace(0.1, 0.9, up_to_rank_no),
                np.linspace(0.1, 0.9, up_to_rank_no)))

    fig, ax = plt.subplots()

    count = 0
    for ndr, front in enumerate(fronts):
        count += 1
        # We plot the points
        for idx in front:
            ax.plot(points[idx][comp[0]],
                    points[idx][comp[1]],
                    marker=marker,
                    color=cl[ndr])
        # We plot the fronts
        # Frist compute the points coordinates
        x = [points[idx][comp[0]] for idx in front]
        y = [points[idx][comp[1]] for idx in front]
        # Then sort them by the first objective
        tmp = [(a, b) for a, b in zip(x, y)]
        tmp = sorted(tmp, key=lambda k: k[0])
        # Now plot using step
        ax.step([c[0] for c in tmp], [c[1] for c in tmp],
                color=cl[ndr],
                where='post')
        if up_to_rank_no is None:
            pass
        else:
            if count >= up_to_rank_no:
                break

    return ax
 def write_ndf_csv(self, name):
     '''Write a csv file that contains the non dominated vectors of the optimisation'''
     fitness_keys = [ key for key in self.fitness_dict.keys() ]
     fitness_values = [ val for val in self.fitness_dict.values() ]
     _, _, dc, ndr  = pyg.fast_non_dominated_sorting(fitness_values)
     ndf = pyg.non_dominated_front_2d(fitness_values)
     
     logger.info(name + "\nNon dominated vectors: "+str(len(ndf)) + "\nDomination count: " + str(dc) +"\nNon domination ranks: " + str(ndr))
     pl.plot_front(name +" all fits", fitness_values, ndf)
     
     # Save ndf results to file
     with open(cfg.RESULTS_PATH + cfg.timestamp + '/NDF-' + name + '.csv', mode='w') as data_file:
         data_writer = csv.writer(data_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
         for i in ndf:
             data = np.concatenate((fitness_keys[i], fitness_values[i]), axis=None)
             data = np.concatenate((data, self.complete_results[fitness_keys[i]]), axis=None)
             data_writer.writerow(data)
Esempio n. 21
0
    def updatePopulation(self, update: PopulationUpdate) -> None:

        features = self.aurora.characterize(update.behaviors)

        super().updatePopulation(update)

        fitnesses = np.array([g.fitness for g in self.genomes])

        novelties = None
        if self.use_local_competition:
            novelties, fitnesses = self.novelty_search.calculate_local_competition(
                features, fitnesses)
        else:
            novelties = self.novelty_search.calculate_novelty(
                features, fitnesses)

        print(novelties)

        for genome, novelty in zip(self.genomes, novelties):
            genome.novelty = novelty

        # Fitness and novelty are made negative, because the non dominated sorting
        # is a minimalization algorithm
        points = list(zip(-fitnesses, -novelties))

        ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(points=points)

        for i in range(len(ndf)):
            front_genomes = [self.genomes[j] for j in ndf[i]]
            front_points = [points[j] for j in ndf[i]]
            front_ranks = [ndr[j] for j in ndf[i]]

            crowding_distances = pg.crowding_distance(
                front_points) if len(front_points) > 1 else np.zeros(
                    (len(front_points)))
            for g, d, r in zip(front_genomes, crowding_distances, front_ranks):
                g.data['crowding_distance'] = d
                g.data['rank'] = r

        # super().updatePopulation(update)

        self.epochs += 1
Esempio n. 22
0
    def _calc_pareto_front(self, n_pareto_points=100):
        pf = None
        p_size, p = uniform_points(n_pareto_points, self.n_obj)
        c = np.ones((p_size, self.n_obj))
        for i in range(p_size):
            for j in range(1, self.n_obj):
                tmp = p[i, j] / p[i, 0] * np.prod(1 - c[i, self.n_obj - 1 - j +
                                                        1:self.n_obj - 1])
                c[i, self.n_obj - j -
                  1] = (tmp**2 - tmp + np.sqrt(2 * tmp)) / (tmp**2 + 1)

        x = np.arccos(c) * 2 / np.pi
        tmp = (1 - np.sin(np.pi / 2 * x[:, 1])) * p[:, self.n_obj -
                                                    1] / p[:, self.n_obj - 2]
        tmp = np.atleast_2d(tmp).reshape(-1, 1)
        a = np.arange(0, 1.00001, 0.0001)
        a = np.atleast_2d(a).reshape(1, -1)
        len_x = x.shape[0]
        E = np.abs(
            tmp.dot(1 - np.cos(np.pi / 2 * a)) - 1 +
            np.repeat(a * np.cos(5 * np.pi * a)**2, len_x, axis=0))

        rank = np.argsort(E, axis=1)
        for i in range(len_x):
            x[i, 0] = a[0, min(rank[i, 1:10])]

        p = convex(x)
        p[:, self.n_obj - 1] = disc(x)

        ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(p)
        ndf = list(ndf)
        # tmp = np.atleast_2d([np.inf] * len(p))
        # tmp[:, ndf[0]] = 1

        p = p[ndf[0], :]
        len_p = len(p)
        p1 = np.repeat(np.atleast_2d(np.arange(2, 2 * self.n_obj + 1, 2)),
                       len_p,
                       axis=0)
        pf = p1 * p

        return pf
Esempio n. 23
0
def normalization_with_nd(mu, data_y):

    # using nd front as normalization boundary
    n_obj = data_y.shape[1]
    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(data_y)
    ndf = list(ndf)
    ndf_size = len(ndf)
    # extract nd for normalization
    if len(ndf[0]) > 1:
        ndf_extend = ndf[0]
    else:
        ndf_extend = np.append(ndf[0], ndf[1])

    nd_front = data_y[ndf_extend, :]

    # normalization boundary
    min_nd_by_feature = np.amin(nd_front, axis=0)
    max_nd_by_feature = np.amax(nd_front, axis=0)

    if np.any(max_nd_by_feature - min_nd_by_feature < 1e-5):
        print('nd front aligned problem, re-select nd front')
        ndf_index = ndf[0]
        for k in np.arange(1, ndf_size):
            ndf_index = np.append(ndf_index, ndf[k])
            nd_front = data_y[ndf_index, :]
            min_nd_by_feature = np.amin(nd_front, axis=0)
            max_nd_by_feature = np.amax(nd_front, axis=0)
            if np.any(max_nd_by_feature - min_nd_by_feature < 1e-5):
                continue
            else:
                break

    # normalize nd front and x population for ei
    norm_nd = (nd_front - min_nd_by_feature) / (max_nd_by_feature - min_nd_by_feature)
    norm_mu = (mu - min_nd_by_feature) / (max_nd_by_feature - min_nd_by_feature)

    point_reference = np.atleast_2d([1.1] * n_obj)
    return norm_mu, norm_nd, point_reference
Esempio n. 24
0
    return archs_list_unique, science_ref_unique, cost_ref_unique, n_instr_unique


archs_unique, science_all_unique, cost_all_unique, num_instr = remove_duplicates(
    archs_all, science_all, cost_all)

### Find the non-dominated architectures
scores = np.empty([len(archs_unique), 2])
for i in range(len(archs_unique)):
    scores[i] = [-float(science_all_unique[i]), float(cost_all_unique[i])]

#science_floats = map(float, science_all_unique)
#cost_floats = map(float, cost_all_unique)

#non_dominated_points = pg.non_dominated_front_2d(scores)
ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(scores)

#domination_bools = pg.pareto_dominance(science_floats, cost_floats)


### Plot the pareto fronts
def plot_pareto_fronts(non_dom_front, archs, science, cost, n_fronts_plot):
    colors = iter(cm.rainbow(np.linspace(0, 1, n_fronts_plot)))
    archs_pareto = []
    science_pareto = []
    cost_pareto = []
    archs_instr_pareto = []
    n_archs = 0
    for i in range(n_fronts_plot):
        archs_rank = []
        science_rank = []
# 		 	# NDF = A and C since no solution(s) dominate A and C

# # points = [[0, 1], [0, 3], [0.8, 0], [0, 0.9], [0.7, 0]]
# # points = [[0,1],[-1,3],[2.3,-0.2],[1.1,-0.12],[1.1, 2.12],[-1.1,-1.1]]
# # points = [[0, 1], [1, 0]]

# nadir = pg.nadir(points) # The nadir is that point that has the maximum value of the objective function in the points of the non-dominated front.
# ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(points)
# print nadir
# print ndf[0] # list of indices of nd solutions

fronts = os.listdir('raw/')
for front in fronts:
    print front
    original_pf = np.loadtxt('raw/' + front)
    ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(original_pf)

    if len(ndf[0]) < len(
            original_pf):  # remove dominated and store non-dom new file
        print "Removing dominated and saving to file"
        new_pf = []
        d = []  # dominated sols indices
        for i in range(len(original_pf)):
            if i in ndf[0]:
                new_pf.append(original_pf[i])
            else:
                d.append(i)

        print 'Number of dominated sols => ', len(
            d)  # number of dominated sols
        new_pf = np.array(new_pf).astype(np.float)
Esempio n. 26
0
def main():

	# Parse command line arguments
	args   = parse_args()

	# Extract arguments
	ntask = args.ntask
	nprocmin_pernode = args.nprocmin_pernode
	optimization = args.optimization
	nrun = args.nrun
	TUNER_NAME = args.optimization
	(machine, processor, nodes, cores) = GetMachineConfiguration()
	print ("machine: " + machine + " processor: " + processor + " num_nodes: " + str(nodes) + " num_cores: " + str(cores))

	os.environ['MACHINE_NAME'] = machine
	os.environ['TUNER_NAME'] = TUNER_NAME


	nprocmax = nodes*cores
	matrices = ["big.rua", "g4.rua", "g20.rua"]
	# matrices = ["Si2.bin", "SiH4.bin", "SiNa.bin", "Na5.bin", "benzene.bin", "Si10H16.bin", "Si5H12.bin", "SiO.bin", "Ga3As3H12.bin","H2O.bin"]
	# matrices = ["Si2.bin", "SiH4.bin", "SiNa.bin", "Na5.bin", "benzene.bin", "Si10H16.bin", "Si5H12.bin", "SiO.bin", "Ga3As3H12.bin", "GaAsH6.bin", "H2O.bin"]
	# Task parameters
	matrix    = Categoricalnorm (matrices, transform="onehot", name="matrix")
	# Input parameters
	COLPERM   = Categoricalnorm (['2', '4'], transform="onehot", name="COLPERM")
	LOOKAHEAD = Integer     (5, 20, transform="normalize", name="LOOKAHEAD")
	nprows    = Integer     (1, nprocmax, transform="normalize", name="nprows")
	npernode     = Integer     (int(math.log2(nprocmin_pernode)), int(math.log2(cores)), transform="normalize", name="npernode")
	NSUP      = Integer     (30, 300, transform="normalize", name="NSUP")
	NREL      = Integer     (10, 40, transform="normalize", name="NREL")	
	time   = Real        (float("-Inf") , float("Inf"), name="time")
	memory    = Real        (float("-Inf") , float("Inf"), name="memory")
	IS = Space([matrix])
	PS = Space([COLPERM, LOOKAHEAD, npernode, nprows, NSUP, NREL])
	OS = Space([time, memory])
	constraints = {"cst1" : cst1, "cst2" : cst2}
	models = {}
	constants={"nodes":nodes,"cores":cores}
	""" Print all input and parameter samples """	
	print(IS, PS, OS, constraints, models)

	problem = TuningProblem(IS, PS, OS, objectives, constraints, None, constants=constants)
	computer = Computer(nodes = nodes, cores = cores, hosts = None)  

	""" Set and validate options """	
	options = Options()
	options['model_processes'] = 1
	# options['model_threads'] = 1
	options['model_restarts'] = 1
	# options['search_multitask_processes'] = 1
	# options['model_restart_processes'] = 1
	options['distributed_memory_parallelism'] = False
	options['shared_memory_parallelism'] = False
	options['model_class'] = 'Model_LCM'
	options['verbose'] = False
	options['search_algo'] = 'nsga2' #'maco' #'moead' #'nsga2' #'nspso' 
	options['search_pop_size'] = 1000
	options['search_gen'] = 10
	options['search_more_samples'] = 4
	options.validate(computer = computer)

	if(TUNER_NAME=='GPTune'):



		""" Building MLA with the given list of tasks """	
		giventasks = [["big.rua"]]	
		# giventasks = [["Si2.bin"],["SiH4.bin"]]	
		# giventasks = [["Si2.bin"],["SiH4.bin"], ["SiNa.bin"], ["Na5.bin"], ["benzene.bin"], ["Si10H16.bin"], ["Si5H12.bin"], ["SiO.bin"], ["Ga3As3H12.bin"],["GaAsH6.bin"],["H2O.bin"]]	
		# giventasks = [["Si2.bin"],["SiH4.bin"], ["SiNa.bin"], ["Na5.bin"], ["benzene.bin"], ["Si10H16.bin"], ["Si5H12.bin"], ["SiO.bin"]]	

		for tmp in giventasks:
			giventask = [tmp]
			data = Data(problem)
			gt = GPTune(problem, computer = computer, data = data, options = options, driverabspath=os.path.abspath(__file__))

			NI = len(giventask)
			NS = nrun
			(data, model,stats) = gt.MLA(NS=NS, NI=NI, Igiven =giventask, NS1 = max(NS//2,1))
			print("stats: ",stats)

			""" Print all input and parameter samples """	
			for tid in range(NI):
				print("tid: %d"%(tid))
				print("    matrix:%s"%(data.I[tid][0]))
				print("    Ps ", data.P[tid])
				print("    Os ", data.O[tid].tolist())
				ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(data.O[tid])
				front = ndf[0]
				# print('front id: ',front)
				fopts = data.O[tid][front]
				xopts = [data.P[tid][i] for i in front]
				print('    Popts ', xopts)		
				print('    Oopts ', fopts.tolist())		
Esempio n. 27
0
def optimizer_DE(problem, nobj, ncon, bounds, recordFlag, pop_test, F, CR, NP, itermax, flag, **kwargs):
    #  NP: number of population members/popsize
    #  itermax: number of generation
    import matplotlib.pyplot as plt
    import pygmo as pg
    plt.ion()

    dimensions = len(bounds)
    # Check input variables
    VTR = -np.inf
    refresh = 0
    # F = 0.8
    # CR = 0.8
    strategy = 6
    use_vectorize = 1

    if NP < 5:
        NP = 5
        print('pop size is increased to minimize size 5')

    if CR < 0 or CR > 1:
        CR = 0.5
        print('CR should be from interval [0,1]; set to default value 0.5')

    if itermax <= 0:
        itermax = 200
        print('generation size is set to default 200')

    # Initialize population and some arrays
    # if pop is a matrix of size NPxD. It will be initialized with random
    # values between the min and max values of the parameters

    min_b, max_b = np.asarray(bounds).T
    diff = np.fabs(min_b - max_b)
    pop = np.random.rand(NP, dimensions)
    pop_x = min_b + pop * diff
    # for test
    # pop_x = np.loadtxt('pop.csv', delimiter=',')


    if 'add_info' in kwargs.keys():
        print(kwargs['add_info'])
        guide_x = kwargs['add_info']

    if 'callback' in kwargs.keys():
        bilevel_fix = kwargs['callback']
        level = kwargs['level']
        compensate_x = kwargs['other_x']

    if 'add_info' in kwargs.keys():
        pop_x[0, :] = guide_x
        pop[0, :] = (guide_x - min_b)/diff


    XVmin = np.repeat(np.atleast_2d(min_b), NP, axis=0)
    XVmax = np.repeat(np.atleast_2d(max_b), NP, axis=0)


    if ncon != 0:
        if 'callback' in kwargs.keys():
            pop_x_complete = bilevel_fix(pop_x, level, compensate_x)
            pop_f, pop_g = problem.evaluate(pop_x_complete, return_values_of=["F", "G"], **kwargs)
        else:
            pop_f, pop_g = problem.evaluate(pop_x, return_values_of=["F", "G"], **kwargs)

        tmp = pop_g.copy()
        tmp[tmp <= 0] = 0
        pop_cv = tmp.sum(axis=1)

    if ncon == 0:
        # np.savetxt('test_x.csv', pop_x, delimiter=',')
        if 'callback' in kwargs.keys():
            pop_x_complete = bilevel_fix(pop_x, level, compensate_x)
            pop_f = problem.evaluate(pop_x_complete, return_values_of=["F"], **kwargs)
        else:
            pop_f = problem.evaluate(pop_x, return_values_of=["F"], **kwargs)


    # plot_infill_landscape(**kwargs)
    #-------------plot---------
    if flag:
        plt.clf()
        obj_f1, _ = kwargs['krg'][0].predict(pop_x)
        obj_f2, _ = kwargs['krg'][1].predict(pop_x)
        nadir = kwargs['nadir']
        ideal = kwargs['ideal']
        train_y = kwargs['train_y']
        ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(train_y)
        ndf = list(ndf)
        nd_front = train_y[ndf[0], :]
        plt.scatter(nd_front[:, 0], nd_front[:, 1], marker='o', c='g')
        plt.scatter(obj_f1.ravel(), obj_f2.ravel())
        plt.xlim((0, 3))
        plt.ylim((0, 4))
        plt.scatter(nadir[0], nadir[1], marker='+', c='r')
        plt.text(nadir[0], nadir[1], 'nadir')
        plt.scatter(ideal[0], ideal[1], marker='+', c='r')
        plt.text(ideal[0], ideal[1], 'ideal')
        plt.pause(0.5)


    # best member of current iteration
    bestval = np.min(pop_f)  # single objective only
    ibest = np.where(pop_f == bestval)  # what if multiple best values?
    bestmemit = pop_x[ibest[0][0]]  # np.where return tuple of (row_list, col_list)
    best_f_gen = []

    # save best_x ever
    bestmem = bestmemit

    # DE-Minimization
    # popold is the population which has to compete. It is static through one
    # iteration. pop is the
    # newly emerging population
    # initialize bestmember  matrix
    bm = np.zeros((NP, dimensions))

    # intermediate population of perturbed vectors
    ui = np.zeros((NP, dimensions))

    # rotating index array (size NP)
    rot = np.arange(0, NP)

    # rotating index array (size D)
    rotd = np.arange(0, dimensions)  # (0:1:D-1);

    iter = 1
    while iter < itermax and bestval > VTR:
        # save the old population
        # print('iteration: %d' % iter)
        oldpop_x = pop_x.copy()

        # index pointer array
        ind = np.random.permutation(4) + 1

        # shuffle locations of vectors
        a1 = np.random.permutation(NP)
        # for test
        # a1 = np.loadtxt('a1.csv', delimiter=',')
        # a1 = np.array(list(map(int, a1)))-1

        # rotate indices by ind(1) positions
        rt = np.remainder(rot + ind[0], NP)
        # rotate vector locations
        a2 = a1[rt]
        # for test
        # a2 = np.loadtxt('a2.csv', delimiter=',')
        # a2 = np.array(list(map(int, a2)))-1

        rt = np.remainder(rot + ind[1], NP)
        a3 = a2[rt]
        # for test
        # a3 = np.loadtxt('a3.csv', delimiter=',')
        # a3 = np.array(list(map(int, a3)))-1

        rt = np.remainder(rot + ind[2], NP)
        a4 = a3[rt]
        # for test
        # a4 = np.loadtxt('a4.csv', delimiter=',')
        # a4 = np.array(list(map(int, a4)))-1

        rt = np.remainder(rot + ind[3], NP)
        a5 = a4[rt]
        # for test
        # a5 = np.loadtxt('a5.csv', delimiter=',')
        # a5 = np.array(list(map(int, a5)))-1

        # shuffled population 1
        pm1 = oldpop_x[a1, :]
        pm2 = oldpop_x[a2, :]
        pm3 = oldpop_x[a3, :]
        pm4 = oldpop_x[a4, :]
        pm5 = oldpop_x[a5, :]

        # population filled with the best member of the last iteration
        # print(bestmemit)
        for i in range(NP):
            bm[i, :] = bestmemit

        mui = np.random.rand(NP, dimensions) < CR
        # mui = np.loadtxt('mui.csv', delimiter=',')

        if strategy > 5:
            st = strategy - 5
        else:
            # exponential crossover
            st = strategy
            # transpose, collect 1's in each column
            # did not implement following strategy process

        # inverse mask to mui
        # mpo = ~mui
        mpo = mui < 0.5



        if st == 1:  # DE/best/1
            # differential variation
            ui = bm + F * (pm1 - pm2)
            # crossover
            ui = oldpop_x * mpo + ui * mui

        if st == 2:  # DE/rand/1
            # differential variation
            ui = pm3 + F * (pm1 - pm2)
            # crossover
            ui = oldpop_x * mpo + ui * mui

        if st == 3:  # DE/rand-to-best/1
            ui = oldpop_x + F * (bm - oldpop_x) + F * (pm1 - pm2)
            ui = oldpop_x * mpo + ui * mui

        if st == 4:  # DE/best/2
            ui = bm + F * (pm1 - pm2 + pm3 - pm4)
            ui = oldpop_x * mpo + ui * mui

        if st == 5:  #DE/rand/2
            ui = pm5 + F * (pm1 - pm2 + pm3 - pm4)
            ui = oldpop_x * mpo + ui * mui


        # correcting violations on the lower bounds of the variables
        # validate components
        maskLB = ui > XVmin
        maskUB = ui < XVmax

        # part one: valid points are saved, part two/three beyond bounds are set as bounds
        ui = ui * maskLB * maskUB + XVmin * (~maskLB) + XVmax * (~maskUB)

        # Select which vectors are allowed to enter the new population
        if use_vectorize == 1:

            if ncon != 0:
                if 'callback' in kwargs.keys():
                    ui_complete = bilevel_fix(ui, level, compensate_x)
                    pop_f_temp, pop_g_temp = problem.evaluate(ui_complete, return_values_of=["F", "G"], **kwargs)
                else:
                    pop_f_temp, pop_g_temp = problem.evaluate(ui, return_values_of=["F", "G"], **kwargs)

                tmp = pop_g_temp.copy()
                tmp[tmp <= 0] = 0
                pop_cv_temp = tmp.sum(axis=1)

            if ncon == 0:
                if 'callback' in kwargs.keys():
                    ui_complete = bilevel_fix(ui, level, compensate_x)
                    pop_f_temp = problem.evaluate(ui_complete, return_values_of=["F"], **kwargs)
                else:
                    pop_f_temp = problem.evaluate(ui, return_values_of=["F"], **kwargs)

            # if competitor is better than value in "cost array"
            indx = pop_f_temp <= pop_f
            # replace old vector with new one (for new iteration)
            change = np.where(indx)
            pop_x[change[0], :] = ui[change[0], :]
            pop_f[change[0], :] = pop_f_temp[change[0], :]

            # we update bestval only in case of success to save time
            indx = pop_f_temp < bestval
            if np.sum(indx) != 0:
                # best member of current iteration
                bestval = np.min(pop_f_temp)  # single objective only
                ibest = np.where(pop_f_temp == bestval)  # what if multiple best values?
                '''
                if len(ibest[0]) > 1:
                    print(
                        "multiple best values, selected first"
                    )
                '''
                bestmem = ui[ibest[0][0], :]


            # freeze the best member of this iteration for the coming
            # iteration. This is needed for some of the strategies.
            bestmemit = bestmem.copy()

        if refresh == 1:
            print('Iteration: %d,  Best: %.4f,  F: %.4f,  CR: %.4f,  NP: %d' % (iter, bestval, F, CR, NP))

        iter = iter + 1

        if flag:
            plt.clf()
            obj_f1, _ = kwargs['krg'][0].predict(pop_x)
            obj_f2, _ = kwargs['krg'][1].predict(pop_x)
            nadir = kwargs['nadir']
            ideal = kwargs['ideal']
            plt.scatter(obj_f1.ravel(), obj_f2.ravel(),  c='r')
            plt.xlim((0, 3))
            plt.ylim((0, 4))
            plt.scatter(nadir[0], nadir[1], marker='+', c='r')
            plt.text(nadir[0], nadir[1], 'nadir')
            plt.scatter(ideal[0], ideal[1], marker='+', c='r')
            plt.scatter(nd_front[:, 0], nd_front[:, 1], marker='o', c='g')
            plt.text(ideal[0], ideal[1], 'ideal')
            plt.pause(0.5)
            print(pop_f.reshape(1, -1))

        del oldpop_x

    plt.ioff()
    #print(pop_f.reshape(1, -1))

    return np.atleast_2d(bestmem), np.atleast_2d(bestval)
Esempio n. 28
0
    def evaluate_pop(self):

        cvi1 = self.cvi1
        cvi2 = self.cvi2
        cvi3 = self.cvi3
        data = self.data
        size = self.size

        self.scores = [
        ]  #list to store the evaluation score of the best model in each iteration

        # calculate the integer value for the offspring out of the total size (20% out of total population)
        offspring20 = size // 5
        # calculate the integer value for the crossover out of the total size (5%)
        crossover5 = size // 20
        log = open("log.txt", "w")
        for iteration in range(self.iterations):  # start the optimization

            population = self.population

            vals12 = [
            ]  # empty list to store the output for the first two evaluation metrics
            vals3 = [
            ]  # empty list to store the output for the third evaluation metric
            indx = [
            ]  # empty list to store the index of the successful configuration from the population
            curr = []
            for i in range(size):
                if len(population[i]) <= 10:
                    try:
                        # process the cluster of each configuration in the population
                        clustering = population[i][1].fit(data)
                    except:
                        continue

                    try:
                        # get the clustering labels
                        labels = list(clustering.labels_)
                        # if the output has one cluster or n clusters, ignore it
                        if len(set(labels)) == 1 or len(
                                set(labels)) >= (len(data) - 1):
                            continue
                    except:
                        continue

                    try:
                        sample_size = int(
                            len(data) * 0.1)  # what is the use of this part???
                        if sample_size < 100:  #
                            sample_size = len(data)  #

                        # some algorithms return cluster labels
                        # where the label numbering starts from -1
                        # we increment such labels with one,
                        # otherwise (in case of the old solution)
                        # we have 0 labels more than needed
                        if -1 in labels:
                            labels = list(np.array(labels) + 1)
                        # for u in range(len(labels)):
                        #     if labels[u] < 0:
                        #         labels[u] = 0

                        # evaluate clustering
                        validate = Validation(
                            np.asmatrix(data).astype(np.float), labels)
                        metric_values = validate.run_list(
                            list(set([cvi1[0], cvi2[0], cvi3[0]])))

                        if "SDBW" in [cvi1[0], cvi2[0]]:
                            sdbw_c = sdbw(
                                np.asmatrix(data).astype(np.float),
                                clustering.labels_,
                                clustering.cluster_centers_)
                            metric_values["SDBW"] = sdbw_c.sdbw_score()

                        # first two eval metrics
                        vals12.append([
                            metric_values[cvi1[0]] * cvi1[1],
                            metric_values[cvi2[0]] * cvi2[1]
                        ])
                        vals3.append(metric_values[cvi3[0]] *
                                     cvi3[1])  # third eval metric

                        try:
                            self.population[i][2] = metric_values[
                                cvi3[0]] * cvi3[1]

                        except:
                            self.population[i].append(metric_values[cvi3[0]] *
                                                      cvi3[1])

                        try:
                            self.population[i][3] = vals12[-1]
                        except:
                            self.population[i].append(vals12[-1])
                    except:
                        continue
                else:
                    vals12.append(population[i][3])
                    vals3.append(population[i][2])

                indx.append(i)
                curr.append(population[i])
                indx.append(i)

            # pareto front optimization to order the configurations using the two eval metrics
            vals12 = np.array(vals12)
            l1 = vals12[:, 0]
            min1 = min(l1)
            max1 = max(l1)
            for i in range(len(l1)):
                l1[i] = (l1[i] - min1) / (max1 - min1)

            l2 = vals12[:, 1]
            min2 = min(l2)
            max2 = max(l2)
            for i in range(len(l2)):
                l2[i] = (l2[i] - min2) / (max2 - min2)

            for i in range(len(l2)):
                vals12[i] = [l1[i], l2[i]]

            ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(points=vals12)
            ndf.reverse()

            # get the top 20% from the total population
            top_20 = []
            count = 0
            for l in ndf:
                temp = []
                for ix in l:
                    #top_20.append(population[indx[ix]])

                    temp.append(curr[ix])
                    ##top_20.append(curr[ix])
                    count += 1
                    if count >= offspring20:
                        break
                temp = sorted(temp, key=itemgetter(2), reverse=True)
                top_20.extend(temp)
                if count >= offspring20:
                    break

            #op_20=sorted(top_20, key=itemgetter(2),reverse=True)

            try:
                score = self.get_nmi_score(top_20[0][1])

            except:
                score = 0.0

            log.write("iteration=> " + str(iteration) + "\n")
            for pops in top_20:
                log.write(str(pops).replace('\n', ' ') + "\n")
            new_population = []

            self.scores.append(score)
            print("iteration: ", iteration, self.scores[-1], top_20[0])
            self.iterations_log.write(
                str(iteration) + "\t" + str(self.scores[-1]) + "\t" +
                str(top_20[0]).replace('\n', ' ') + "\n")
            new_population = list(top_20)
            #print(top_20)
            # do cross over
            for c in range(0, crossover5 - 2, 2):
                new_population.extend(
                    self.cross_over(population[offspring20 + c],
                                    population[offspring20 + c + 1]))

            # do mutation
            for m in range(crossover5, offspring20):
                if random.randint(1, 3) == 1:
                    new_population.extend(
                        self.mutation([population[m + offspring20]]))
                else:
                    new_population.append(population[m + offspring20])

            self.population = []

            # update population and start new iteration
            self.generate_pop(population=new_population)

        return top_20  # return the final top 20 solutions
Esempio n. 29
0
def main():

	global ROOTDIR
	global nodes
	global cores
	global target
	global nprocmax
	global nprocmin

	# Parse command line arguments
	args   = parse_args()

	# Extract arguments
	ntask = args.ntask
	nodes = args.nodes
	cores = args.cores
	nprocmin_pernode = args.nprocmin_pernode	
	machine = args.machine
	optimization = args.optimization
	nruns = args.nruns
	truns = args.truns
	# JOBID = args.jobid
	TUNER_NAME = args.optimization
	os.environ['MACHINE_NAME'] = machine
	os.environ['TUNER_NAME'] = TUNER_NAME


	nprocmax = nodes*cores-1  # YL: there is one proc doing spawning, so nodes*cores should be at least 2
	nprocmin = min(nodes*nprocmin_pernode,nprocmax-1)  # YL: ensure strictly nprocmin<nprocmax, required by the Integer space
	# matrices = ["big.rua", "g4.rua", "g20.rua"]
	# matrices = ["Si2.bin", "SiH4.bin", "SiNa.bin", "Na5.bin", "benzene.bin", "Si10H16.bin", "Si5H12.bin", "SiO.bin", "Ga3As3H12.bin","H2O.bin"]
	matrices = ["Si2.bin", "SiH4.bin", "SiNa.bin", "Na5.bin", "benzene.bin", "Si10H16.bin", "Si5H12.bin", "SiO.bin", "Ga3As3H12.bin", "GaAsH6.bin", "H2O.bin"]
	# Task parameters
	matrix    = Categoricalnorm (matrices, transform="onehot", name="matrix")
	# Input parameters
	COLPERM   = Categoricalnorm (['2', '4'], transform="onehot", name="COLPERM")
	LOOKAHEAD = Integer     (5, 20, transform="normalize", name="LOOKAHEAD")
	nprows    = Integer     (1, nprocmax, transform="normalize", name="nprows")
	nproc     = Integer     (nprocmin, nprocmax, transform="normalize", name="nproc")
	NSUP      = Integer     (30, 300, transform="normalize", name="NSUP")
	NREL      = Integer     (10, 40, transform="normalize", name="NREL")	
	runtime   = Real        (float("-Inf") , float("Inf"), name="runtime")
	memory    = Real        (float("-Inf") , float("Inf"), name="memory")
	IS = Space([matrix])
	PS = Space([COLPERM, LOOKAHEAD, nproc, nprows, NSUP, NREL])
	OS = Space([runtime, memory])
	cst1 = "NSUP >= NREL"
	cst2 = "nproc >= nprows" # intrinsically implies "p <= nproc"
	constraints = {"cst1" : cst1, "cst2" : cst2}
	models = {}

	""" Print all input and parameter samples """	
	print(IS, PS, OS, constraints, models)

	problem = TuningProblem(IS, PS, OS, objectives, constraints, None)
	computer = Computer(nodes = nodes, cores = cores, hosts = None)  

	""" Set and validate options """	
	options = Options()
	options['model_processes'] = 1
	# options['model_threads'] = 1
	options['model_restarts'] = 1
	# options['search_multitask_processes'] = 1
	# options['model_restart_processes'] = 1
	options['distributed_memory_parallelism'] = False
	options['shared_memory_parallelism'] = False
	options['model_class '] = 'Model_LCM'
	options['verbose'] = False
	options['search_algo'] = 'nsga2' #'maco' #'moead' #'nsga2' #'nspso' 
	options['search_pop_size'] = 1000
	options['search_gen'] = 10
	options['search_more_samples'] = 4
	options.validate(computer = computer)

	if(TUNER_NAME=='GPTune'):



		""" Building MLA with the given list of tasks """	
		# giventasks = [["big.rua"], ["g4.rua"], ["g20.rua"]]	
		# giventasks = [["Si2.bin"],["SiH4.bin"]]	
		# giventasks = [["Si2.bin"],["SiH4.bin"], ["SiNa.bin"], ["Na5.bin"], ["benzene.bin"], ["Si10H16.bin"], ["Si5H12.bin"], ["SiO.bin"], ["Ga3As3H12.bin"],["GaAsH6.bin"],["H2O.bin"]]	
		giventasks = [["Si2.bin"],["SiH4.bin"], ["SiNa.bin"], ["Na5.bin"], ["benzene.bin"], ["Si10H16.bin"], ["Si5H12.bin"], ["SiO.bin"]]	

		for tmp in giventasks:
			giventask = [tmp]
			data = Data(problem)
			gt = GPTune(problem, computer = computer, data = data, options = options)

			NI = len(giventask)
			NS = nruns
			(data, model,stats) = gt.MLA(NS=NS, NI=NI, Igiven =giventask, NS1 = max(NS//2,1))
			print("stats: ",stats)

			""" Print all input and parameter samples """	
			for tid in range(NI):
				print("tid: %d"%(tid))
				print("    matrix:%s"%(data.I[tid][0]))
				print("    Ps ", data.P[tid])
				print("    Os ", data.O[tid].tolist())
				ndf, dl, dc, ndr = pg.fast_non_dominated_sorting(data.O[tid])
				front = ndf[0]
				# print('front id: ',front)
				fopts = data.O[tid][front]
				xopts = [data.P[tid][i] for i in front]
				print('    Popts ', xopts)		
				print('    Oopts ', fopts.tolist())		
Esempio n. 30
0
    def finish(self):

        if self.optimizer == "brute":
            self.modelparms.write("\n")
            self.modelprediction.write("\n")
            self.modeldiff.write("\n")

            self.modelparms.close()
            self.modelprediction.close()
            self.modeldiff.close()

            return 1

        self.x = np.array(self.x)
        self.f_A = np.array(self.f_A)
        self.rmsd_A = np.array(self.rmsd_A)
        self.f_B = np.array(self.f_B)
        self.rmsd_B = np.array(self.rmsd_B)

        self.R2_A = np.array(self.R2_A)
        self.R2_B = np.array(self.R2_B)

        ### Write out best result for selection A
        ### -------------------------------------

        if self.fitter.decomp:
            ### ndf (list of 1D NumPy int array): the non-dominated fronts
            ### dl  (list of 1D NumPy int array): the domination list
            ### dc  (1D NumPy int array)        : the domination count
            ### ndr (1D NumPy int array)        : the non-domination ranks
            ndf, dl, dc, ndr = pygmo.fast_non_dominated_sorting(self.f_A)
            ax_A = pygmo.plot_non_dominated_fronts(self.f_A)
            ax_A.figure.savefig("%spareto.selectionA.png" % self.prefix,
                                dpi=1000)
            ax_A.figure.clear("all")

            ordered_ndf = list()
            for front in ndf:
                ordered_ndf.append(pygmo.sort_population_mo(self.f_A[front]))
        else:
            ordered_ndf = np.argsort(self.f_A, axis=0)

        self.modelparms.write("### Best result (A)\n")
        self.modelprediction.write("### Best result (A)\n")
        self.modeldiff.write("### Best result (A)\n")

        for front_count, front in enumerate(ordered_ndf):
            for solution_i in front:

                step = self.step[solution_i]
                x = self.x[solution_i]
                f_A = self.f_A[solution_i]
                f_B = self.f_B[solution_i]
                rmsd_A = self.rmsd_A[solution_i]
                rmsd_B = self.rmsd_B[solution_i]
                R2_A = self.R2_A[solution_i]
                R2_B = self.R2_B[solution_i]

                self.modelparms.write("%d/%d " % (step, front_count))
                self.modelprediction.write("%d/%d " % (step, front_count))
                self.modeldiff.write("%d/%d " % (step, front_count))

                self.fitter.gist_functional(x)
                self.fitter._f_process(x)

                if self.mode in [0, 3, 5]:
                    for i in self.parmidx:
                        self.modelparms.write("%6.3f " % x[i])
                    self.modelparms.write("%6.3f " % f_A[0])
                    self.modelparms.write("%6.3f " % f_B[0])
                    self.modelparms.write("%6.3f " % R2_A)
                    self.modelparms.write("%6.3f " % R2_B)
                    self.modelparms.write("%6.3f " % rmsd_A)
                    self.modelparms.write("%6.3f " % rmsd_B)

                elif self.mode in [1, 4, 6, 7]:
                    ### Energy Output
                    for i in self.parmidx:
                        self.modelparms.write("%6.3f " % x[i])
                    self.modelparms.write("%6.3f " % f_A[0])
                    self.modelparms.write("%6.3f " % f_B[0])
                    self.modelparms.write("%6.3f " % R2_A[0])
                    self.modelparms.write("%6.3f " % R2_B[0])
                    self.modelparms.write("%6.3f " % rmsd_A[0])
                    self.modelparms.write("%6.3f " % rmsd_B[0])
                    self.modelparms.write("\n")

                    ### Entropy Output
                    self.modelparms.write("%d/%d " % (step, front_count))
                    for i in self.parmidx:
                        self.modelparms.write("%6.3f " % x[i])
                    self.modelparms.write("%6.3f " % f_A[1])
                    self.modelparms.write("%6.3f " % f_B[1])
                    self.modelparms.write("%6.3f " % R2_A[1])
                    self.modelparms.write("%6.3f " % R2_B[1])
                    self.modelparms.write("%6.3f " % rmsd_A[1])
                    self.modelparms.write("%6.3f " % rmsd_B[1])

                else:
                    mode_error(self.mode)

                if self.mode in [0, 3, 5]:
                    for i in range(self.N_len):
                        self.modelprediction.write("%6.3f " %
                                                   self.fitter._f[i])
                        diff = self.fitter._exp_data[i] - self.fitter._f[i]
                        self.modeldiff.write("%6.3f " % diff)
                elif self.mode in [1, 4, 6, 7]:
                    for i in range(self.N_len):
                        self.modelprediction.write("%6.3f " %
                                                   self.fitter._f[i, 0])
                        diff = self.fitter._exp_data[i, 0] - self.fitter._f[i,
                                                                            0]
                        self.modeldiff.write("%6.3f " % diff)
                    self.modelprediction.write("\n")
                    self.modelprediction.write("%d/%d " % (step, front_count))
                    self.modeldiff.write("\n")
                    self.modeldiff.write("%d/%d " % (step, front_count))
                    for i in range(self.N_len):
                        self.modelprediction.write("%6.3f " %
                                                   self.fitter._f[i, 1])
                        diff = self.fitter._exp_data[i, 1] - self.fitter._f[i,
                                                                            1]
                        self.modeldiff.write("%6.3f " % diff)
                else:
                    mode_error(self.mode)

                self.modelparms.write("\n")
                self.modelprediction.write("\n")
                self.modeldiff.write("\n")

        self.modelparms.write("\n")
        self.modelprediction.write("\n")
        self.modeldiff.write("\n")

        self.modelparms.close()
        self.modelprediction.close()
        self.modeldiff.close()