Пример #1
0
    def compute_HVI_LCB(self, X):
        X = np.atleast_2d(X)
        if self.last_step_flag:
            # f = [self.compute_HVI_LCB_last_step, self.probability_feasibility_multi_gp_wrapper(model=self.model_c, l=0)]

            return -self.compute_HVI_LCB_last_step(X)
        else:
            P = self.model.get_Y_values()
            P_cur = (-np.concatenate(P,axis=1)).tolist()

            HV0_func = hypervolume(P_cur)
            HV0 = HV0_func.compute(ref_point=self.ref_point)
            HV_new = np.zeros(X.shape[0])

            for i in range(len(X)):
                x = np.atleast_2d(X[i])
                ## Hypervolume computations
                mu = -self.model.posterior_mean(x)
                var = self.model.posterior_variance(x, noise=False)
                y_lcb = mu - self.alpha * np.sqrt(var)
                y_lcb = np.transpose(y_lcb)
                P_new = (-np.concatenate(P,axis=1)).tolist()
                P_new = np.vstack([P_new, y_lcb])
                hv_new =hypervolume(P_new.tolist())
                try:
                    HV_new[i] = hv_new.compute(ref_point = self.ref_point)
                except:
                    print("warwning! points outside reference")
                    HV_new[i] =0
            HVI = HV_new - HV0
            HVI[HVI < 0.0] = 0.0
            return HVI
Пример #2
0
def nds_moo(models_df, n_selected = 10, with_acc = False):
    models_df['EO'] = -models_df['EO']
    models_df['DP'] = -models_df['DP']
    if 'Acc' in models_df.columns:
        models_df['Acc'] = -models_df['Acc']
    metrics = models_df.values.tolist()

    fronts = ndomsort.non_domin_sort(metrics)
    selected_indexes = []
    for front in fronts:
        hv = pg.hypervolume([list(s) for s in fronts[front]])
        
        if len(selected_indexes)==n_selected:
            break
        
        if len(fronts[front])+len(selected_indexes)<n_selected:
            selected_indexes+=[metrics.index(seq) for seq in fronts[front]]
        else:
            last_front = list(copy.copy(fronts[front]))
            
            nadir = np.max(metrics,axis=0)
            while len(last_front)>n_selected-len(selected_indexes):
                hv = pg.hypervolume([list(s) for s in last_front])
                try:
                    idx_excl = hv.least_contributor(nadir)
                    del last_front[idx_excl]
                except:
                    break
                
            selected_indexes += [metrics.index(seq) for seq in last_front]
            
    index_list = [models_df.index.tolist()[i] for i in selected_indexes]
    return index_list
Пример #3
0
def compare_save_ego2nsga(problem_list):
    save_compare = np.atleast_2d([0, 0])
    for p in problem_list:
        problem = p
        f_pareto = ego_outputs_read(problem)
        f_pareto2 = nsga2_outputs_read(problem)

        point_list = np.vstack((f_pareto, f_pareto2))
        point_nadir = np.max(point_list, axis=0)
        point_reference = point_nadir * 1.1

        hv_ego = pg.hypervolume(f_pareto)
        hv_nsga = pg.hypervolume(f_pareto2)

        hv_value_ego = hv_ego.compute(point_reference)
        hv_value_nsga = hv_nsga.compute(point_reference)

        new_compare = np.atleast_2d([hv_value_ego, hv_value_nsga])
        save_compare = np.vstack((save_compare, new_compare))

    save_compare = np.delete(save_compare, 0, 0).reshape(-1, 2)
    print(save_compare)
    with open('mo_compare.txt', 'w') as f:
        for i, p in enumerate(problem_list):
            f.write(p)
            f.write('\t')
            f.write(str(save_compare[i, 0]))
            f.write('\t')
            f.write(str(save_compare[i, 1]))
            f.write('\n')
Пример #4
0
 def evaluatePopulation(self, candidatesScore, candidatesId):
     refPoint = np.max(candidatesScore, 0) + 1e-12
     popFitness = np.zeros((self.popSize,))
     for i in range(self.popSize):
         hypervolumeIndicator = hypervolume(candidatesScore[candidatesId == i])
         popFitness[i] = hypervolumeIndicator.compute(refPoint)
     newBest = False
     bestIndividualIdx = int(np.argmax(popFitness))
     if self.ItCounter == 0:
         newBest = True
     else:
         if np.logical_and.reduce(np.max(self.bestLandscapeIdentifierScore, 0) < refPoint):
             hypervolumeIndicator = hypervolume(self.bestLandscapeIdentifierScore)
             bestIndividualUpdatedFitness = hypervolumeIndicator.compute(refPoint)
             if popFitness[bestIndividualIdx] > bestIndividualUpdatedFitness:
                 newBest = True
             else:
                 if self.adaptative:
                     self.landscapeIdentifiers = np.append(self.landscapeIdentifiers, self.bestLandscapeIdentifier)
                     popFitness = np.append(popFitness, bestIndividualUpdatedFitness)
         else:
             newBest = True
     if newBest:
         self.bestLandscapeIdentifierScore = candidatesScore[candidatesId == bestIndividualIdx]
         self.bestLandscapeIdentifier = self.landscapeIdentifiers[bestIndividualIdx]
     return popFitness
Пример #5
0
Файл: motpe.py Проект: y0z/motpe
    def _split_observations(self, hp_values, ys, n_lower):
        SPLITCACHE_KEY = str(ys)
        if SPLITCACHE_KEY in self.split_cache:
            lower_indices = self.split_cache[SPLITCACHE_KEY]['lower_indices']
            upper_indices = self.split_cache[SPLITCACHE_KEY]['upper_indices']
        else:
            rank = nondominated_sort(ys)
            indices = np.array(range(len(ys)))
            lower_indices = np.array([], dtype=int)

            # nondominance rank-based selection
            i = 0
            while len(lower_indices) + sum(rank == i) <= n_lower:
                lower_indices = np.append(lower_indices, indices[rank == i])
                i += 1

            # hypervolume contribution-based selection
            ys_r = ys[rank == i]
            indices_r = indices[rank == i]
            worst_point = np.max(ys, axis=0)
            reference_point = np.maximum(
                np.maximum(
                    1.1 * worst_point,  # case: value > 0
                    0.9 * worst_point  # case: value < 0
                ),
                np.full(len(worst_point), eps)  # case: value = 0
            )

            S = []
            contributions = []
            for j in range(len(ys_r)):
                contributions.append(
                    hypervolume([ys_r[j]]).compute(reference_point))
            while len(lower_indices) + 1 <= n_lower:
                hv_S = 0
                if len(S) > 0:
                    hv_S = hypervolume(S).compute(reference_point)
                index = np.argmax(contributions)
                contributions[index] = -1e9  # mark as already selected
                for j in range(len(contributions)):
                    if j == index:
                        continue
                    p_q = np.max([ys_r[index], ys_r[j]], axis=0)
                    contributions[j] = contributions[j] \
                        - (hypervolume(S + [p_q]).compute(reference_point) - hv_S)
                S = S + [ys_r[index]]
                lower_indices = np.append(lower_indices, indices_r[index])
            upper_indices = np.setdiff1d(indices, lower_indices)

            self.split_cache[SPLITCACHE_KEY] = {
                'lower_indices': lower_indices,
                'upper_indices': upper_indices
            }

        return hp_values[lower_indices], hp_values[upper_indices]
Пример #6
0
def reconstruct_hv_per_feval_meta(max_fevals, x_list, f_list, hv_pop):
    # Have the same ref point at the beginning, and compute the starting hypervolume
    original_hv = pg.hypervolume(hv_pop)
    ref = original_hv.refpoint(offset=4.0)
    hv = []

    for fevals in range(max_fevals):
        hv_pop.push_back(x_list[fevals], f_list[fevals])
        new_hv = pg.hypervolume(hv_pop)
        hv.append(new_hv.compute(ref))

    return hv
Пример #7
0
    def compute_HVI_LCB_last_step(self, X):
        X = np.atleast_2d(X)
        print("X",X)
        HV0 = 0
        HV_new = np.zeros(X.shape[0])

        for i in range(len(X)):
            x = np.atleast_2d(X[i])
            ## Hypervolume computations
            print("x",x)
            mu = -self.model.posterior_mean(x)
            var = self.model.posterior_variance(x, noise=False)
            y_lcb = mu - self.alpha * np.sqrt(var)
            y_lcb = np.transpose(y_lcb)
            # print("np.array(mu).reshape(-1) ",np.array(y_lcb).reshape(-1)  )
            # print("self.ref_point",self.ref_point)
            # print("np.array(y_lcb).reshape(-1) >  self.ref_point",np.array(y_lcb).reshape(-1) >  self.ref_point)
            # print("np.product(np.array(y_lcb).reshape(-1) >  self.ref_point)",np.product(np.array(y_lcb).reshape(-1) >  self.ref_point))
            if np.sum(np.array(y_lcb).reshape(-1) >  self.ref_point)>0:
                HV_new[i] = -999999
            else:
                hv_new =hypervolume(y_lcb.tolist())
                HV_new[i] = hv_new.compute(ref_point = self.ref_point)
        HVI = HV_new - HV0
        HVI[HVI < 0.0] = 0.0
        return -HVI
Пример #8
0
def output_2d_cosy(popi,filename):

    hv = pg.hypervolume(popi)
    ref_point = hv.refpoint()
    best_point = (popi.get_f()[hv.greatest_contributor(ref_point)])
    ndf, dl, dc, ndl = pg.fast_non_dominated_sorting(popi.get_f())
    magnet_dim = len(popi.get_x()[0])
    ndf_champ = []
    sorted_ndf = []
    sort_param = 3
    for i in ndf[0]:
        if i == ndf[0][0]:
            sorted_ndf.append(i)
        else:
            for j in range(len(sorted_ndf)):
                if j == len(sorted_ndf)-1:
                    sorted_ndf.append(i)
                    break
                elif j == 0 and popi.get_f()[i][sort_param] < popi.get_f()[sorted_ndf[j]][sort_param]:
                    sorted_ndf.insert(j,i)
                    break
                elif (popi.get_f()[i][sort_param] < popi.get_f()[sorted_ndf[j]][sort_param]) and j>0:
                    if(popi.get_f()[i][sort_param] >= popi.get_f()[sorted_ndf[j-1]][sort_param]):
#                        print(popi.get_f()[i][0],popi.get_f()[sorted_ndf[j]][0],popi.get_f()[sorted_ndf[j-1]][0])
                        sorted_ndf.insert(j,i)
                    break 
    print(ndf[0], sorted_ndf)
    
    for i in range(len(sorted_ndf)):
        j = sorted_ndf[i] 
        write_fox(np.power(np.zeros(magnet_dim)+2,popi.get_x()[j]), i, "2f_FP3/")
    return
Пример #9
0
def SpreadDeltaSMetricQualityIndicator(points, title):
    extremeFobj1 = np.array((points[0][0], points[0][1]))
    extremeParetoFront1 = np.array((points[1][0], points[1][1]))
    df = np.linalg.norm(extremeFobj1 - extremeParetoFront1)

    extremeFobj2 = np.array((points[-1][0], points[-1][1]))
    extremeParetoFront2 = np.array((points[-2][0], points[-2][1]))
    dl = np.linalg.norm(extremeFobj2 - extremeParetoFront2)

    fobjmintime = arr.array('d', [])
    fobjmindist = arr.array('d', [])

    for point in points:
        fobjmintime.append(point[0])
        fobjmindist.append(point[1])

    n = len(fobjmintime)
    norms = np.array([])
    for index in range(2, n - 1):
        d1 = np.array((points[index - 1][0], points[index - 1][1]))
        d2 = np.array((points[index][0], points[index][1]))
        norms = np.append(norms, np.linalg.norm(d2 - d1))

    dAverage = sum(norms) / len(norms)

    sumNorms = 0
    for norm in norms:
        r = norm - dAverage
        if r < 0:
            r *= -1
        sumNorms += r

    DELTA = (df + dl + sumNorms) / (df + dl + (len(norms) * dAverage))

    hv = hypervolume(points)
    ref_point = np.array([1700, 35])  #
    #ref_point = pg.nadir(points)
    sMetric = hv.compute(ref_point)
    plt.annotate('Nadir', (ref_point[0], ref_point[1]), color='black')

    colors = np.random.rand(n)
    plt.scatter(fobjmintime, fobjmindist, c=colors, alpha=0.5)
    plt.plot([points[0][0], points[1][0]], [points[0][1], points[1][1]],
             lw=3,
             color='black',
             clip_on=False,
             label="dl")
    plt.plot([points[-1][0], points[-2][0]], [points[-1][1], points[-2][1]],
             lw=3,
             color='red',
             clip_on=False,
             label="df")
    plt.xlabel('Time')
    plt.ylabel('Distance')
    plt.legend(loc="lower left")

    plt.title(title + ' - DELTA: ' + str(round(DELTA, 3)) + ' | S-metric: ' +
              str(round(sMetric, 2)))

    plt.show()
Пример #10
0
def hv_function():
    hv = pg.hypervolume([[1, 0], [0.5, 0.5], [0, 1], [1.5, 0.75]])
    ref_point = [2, 2]
    hv_compute_value = hv.compute(ref_point)
    print("ref_point compute:")
    print(hv_compute_value)

    hv_exclusive_value = hv.exclusive(1, ref_point)
    print("ref_point 1 exclusive:")
    print(hv_exclusive_value)

    hv_exclusive_value = hv.exclusive(0, ref_point)
    print("ref_point 0 exclusive:")
    print(hv_exclusive_value)

    hv_least_contributor_value = hv.least_contributor(ref_point)
    print("ref_point least_contributor:")
    print(hv_least_contributor_value)
    hv_greatest_contributor_value = hv.greatest_contributor(ref_point)
    print("ref_point greatest_contributor:")
    print(hv_greatest_contributor_value)
    hv_contributions_value = hv.contributions(ref_point)
    print("ref_point contributions:")
    print(hv_contributions_value)

    return 0
Пример #11
0
def compute_hypervolume_contributions(points, ref):
    # pygmo uses hv minimization,
    # negate rewards to get costs
    points *= -1.
    hv = hypervolume(points)
    # use negative ref-point for minimization
    return hv.contributions(ref*-1)
Пример #12
0
def calculateHVC(data_set, reference_point, is_maximize):
    """
    Precisely HVC calculator, using WFG method

    Parameters
    ----------
    data_set: 
    reference_point: 
    is_maximize: 

    Returns
    -------
    A list of HVC, calculate HVC for every point

    """
    (point_num, dimension) = np.shape(data_set)
    HVC = np.zeros((point_num))
    if is_maximize is True:
        data = data_set * -1
    else:
        data = data_set
    hv = pygmo.hypervolume(data)
    # HV = hv.compute(reference_point)
    for p in range(point_num):
        HVC[p] = hv.exclusive(p, reference_point)
    return HVC
Пример #13
0
def calculateHypervolume(objectivesList, ideal_point, max_point, bolMinimize, bolNormalized):
    '''Calculate the current hypervolume.'''    
    # Negate values for maximization
    if bolMinimize == False:
        minObjectivesList = []
         
        # When objectives are normalized, invert them
        if bolNormalized:
            for objs in objectivesList:
                minObjectivesList.append(list(map(lambda y : 1 - y, objs)))
        # Else, multiply them by -1
        else:
            for objs in objectivesList:
                minObjectivesList.append(list(map(lambda y : -y, objs)))
    else:
        minObjectivesList = objectivesList
        
    # Choose reference point
    # When objectives are normalized, the reference is 1, 1, 1, ...

    if bolNormalized:
      ref_point = [1] * len(ideal_point)
    else:   
        # When objectives are minimized, the reference is the largest point
        if bolMinimize: 
            ref_point = max_point
        # When objectives are maximized, the reference is the smallest point
        # But multiplied by -1, because all values are multiplied by -1
        else:
            ref_point = ideal_point * -1

    hv = pg.hypervolume(minObjectivesList)
    return hv.compute(ref_point)
Пример #14
0
def return_hv(nd_front, reference_point, target_problem):
    p_name = target_problem.name()
    if 'DTLZ' in p_name and int(p_name[-1]) < 5:
        ref_dir = get_uniform_weights(10000, 2)
        true_pf = target_problem.pareto_front(ref_dir)
    else:
        true_pf = target_problem.pareto_front(n_pareto_points=10000)

    max_by_f = np.amax(true_pf, axis=0)
    min_by_f = np.amin(true_pf, axis=0)

    # normalized to 0-1
    nd_front = (nd_front - min_by_f) / (max_by_f - min_by_f)

    n_obj = nd_front.shape[1]
    n_nd = nd_front.shape[0]

    reference_point_norm = reference_point

    nd_list = []
    for i in range(n_nd):
        if np.all(nd_front[i, :] < reference_point):
            nd_list = np.append(nd_list, nd_front[i, :])
    nd_list = np.atleast_2d(nd_list).reshape(-1, n_obj)

    if len(nd_list) > 0:
        hv = pg.hypervolume(nd_list)
        hv_value = hv.compute(reference_point_norm)
    else:
        hv_value = 0

    return hv_value
    def make_hv_dataset(self,
                        n_instances=1000,
                        n_objects=5,
                        n_features=5,
                        seed=42,
                        cluster_spread=1.0,
                        **kwd):
        def sample_unit_ball(n_f=2, rng=None, radius=1.):
            rng = check_random_state(rng)
            X = rng.randn(1, n_f)
            u = rng.uniform(size=1)[:, None]
            X /= np.linalg.norm(X, axis=1, ord=2)[:, None]
            X *= radius * u
            return X[0]

        random_state = check_random_state(seed=seed)
        X = random_state.rand(n_instances, n_objects, n_features)
        # Normalize to unit circle and fold to lower quadrant
        X = -np.abs(X / np.sqrt(np.power(X, 2).sum(axis=2))[..., None])
        Y = np.empty(n_instances, dtype=int)
        for i in range(n_instances):
            center = sample_unit_ball(n_f=n_features,
                                      rng=i,
                                      radius=cluster_spread)
            X[i] = X[i] + center
            hv = hypervolume(X[i])
            cont = hv.contributions(center)
            Y[i] = np.argmax(cont)
        Y = convert_to_label_encoding(Y, n_objects)
        return X, Y
Пример #16
0
def updatecontributions(A,refpoint):
    points = [ob.fitness for ob in A]
    hv = hypervolume(points)
    cont = hv.contributions(refpoint) 
    for o in range(len(A)):
        A[o].contribution = cont[o]
    return A
    def optimize_final_evaluation(self):

        if self.last_step_evaluator is None:
            if self.constraint is None:
                sampled_Y = self.model.get_Y_values()
                sampled_Y = np.concatenate(sampled_Y, axis=1).tolist()

                sampled_hv = hypervolume(sampled_Y)
                sampled_HV  = sampled_hv.compute(ref_point = self.ref_point)
                #self.Opportunity_Cost.append(sampled_HV)
                feasable_Y = sampled_Y
                self.store_results(feasable_Y)
            else:
                sampled_Y = self.model.get_Y_values()
                sampled_Y = np.concatenate(sampled_Y, axis=1)

                C_true, C_cost_new = self.constraint.evaluate(self.X ,true_val=True)
                feasable_samples = np.product(np.concatenate(C_true, axis=1) < 0, axis=1)
                feasable_samples = np.array(feasable_samples, dtype=bool)
                feasable_Y = sampled_Y[feasable_samples]

                self.store_results(feasable_Y)
        else:

            if self.constraint is not None:
                suggested_sample = self._compute_final_evaluations()
                self.suggested_final_evaluation = suggested_sample

                Y_new, cost_new = self.objective.evaluate(suggested_sample)
                Y_new = np.concatenate(Y_new, axis=1)
                C_new, C_cost_new = self.constraint.evaluate(suggested_sample)
                C_new = np.concatenate(C_new, axis=1)

                sampled_Y = self.model.get_Y_values()
                sampled_Y = np.concatenate(sampled_Y, axis=1)
                sampled_Y = np.vstack((sampled_Y , Y_new ))

                C_true, C_cost_new = self.constraint.evaluate(self.X, true_val=True)
                C_true = np.concatenate(C_true, axis=1)
                C_true = np.vstack((C_true, C_new))

                feasable_samples = np.product( C_true < 0, axis=1)
                feasable_samples = np.array(feasable_samples, dtype=bool)
                feasable_Y = sampled_Y[feasable_samples]
                self.store_results(feasable_Y)
            else:
                suggested_sample = self._compute_final_evaluations()
                self.suggested_final_evaluation = suggested_sample

                Y_new, cost_new = self.objective.evaluate(suggested_sample)
                Y_new = np.concatenate(Y_new, axis=1)

                X_train = self.model.get_X_values()
                sampled_Y , cost_new = self.objective.evaluate(X_train)
                sampled_Y = np.concatenate(sampled_Y, axis=1)
                sampled_Y = np.vstack((sampled_Y, Y_new))

                feasable_Y = sampled_Y
                self.store_results(feasable_Y)
Пример #18
0
def compute_hypervolume(solutions, p_ref, contributor=0):
    # for DST, is [0, -25], but use negative for minimization
    p_ref = p_ref * -1
    # pygmo uses hv minimization,
    # negate rewards to get costs
    points = np.array(solutions) * -1.
    hv = hypervolume(points)
    return hv.compute(p_ref), hv.exclusive(contributor, p_ref)
Пример #19
0
def H(d, r):
    try:
        from pygmo import hypervolume
    except ImportError as e:
        raise ImportError(
            "Failed to import pygmo. To use it, please install pygmo according to https://esa.github.io/pygmo2/install.html ."
        )
    return hypervolume(d).compute(r)
def main():
    # Q1.1
    df = generateDataFrame()
    print("Q1.1\n", df)

    # Q1.2
    df = ENDS(df)
    print("\nQ1.2\n", df[['f1', 'f2', 'front number']])
    worst_f1 = max(df['f1'])
    worst_f2 = max(df['f2'])
    print(f"\nworst f1: {worst_f1}\nworst f2: {worst_f2}")

    # Q1.3
    df = crowding_distance(df)
    print("\nQ1.3\n", df[['f1', 'f2', 'front number', "crowding distance"]])

    # Q1.4
    initial_df = df
    df = next_generation(df)
    plot(df, initial_df)

    # Q1.5 Combined
    df = pd.concat([df, initial_df])
    df = ENDS(df)
    df = crowding_distance(df)

    # Select 25 individuals based on ENDS
    df.sort_values(['front number', 'crowding distance'], ascending=[True, False], inplace=True)

    fig = plt.figure()
    ax1 = fig.add_subplot(111)

    ax1.scatter(df[:25]['f1'], df[:25]['f2'], s=20, c='b', marker="o", label='selected generation')
    ax1.scatter(df[25:]['f1'], df[25:]['f2'], s=20, c='r', marker="o", label='overall generation')
    plt.legend(loc='upper left')
    plt.show()

    # Q1.6 Hypervolume
    hypervolumes = []
    # Calculate for every gen
    for i in range(NGEN):
        # create next generation
        df = next_generation(df)
        df = ENDS(df)
        df = crowding_distance(df)

        # Calculate hypervolume from previously determined worst values in initial gen
        hyp = pg.hypervolume(df[['f1', 'f2']].values)
        hyp = hyp.compute([worst_f1, worst_f2])
        print(f"Hypervolume: {hyp}")
        # Normalise the hypervolume
        hyp = hyp/np.prod([worst_f1, worst_f2])
        print(f'Normalised Hypervolume: {hyp}')
        hypervolumes.append(hyp)

    # plot the hypervolumes against generation number
    sns.regplot([i for i in range(len(hypervolumes))], hypervolumes)
    plt.show()
Пример #21
0
def E_SMS_EGO(problem, eval_budget, time_budget):
    dim = problem.n_var
    sample_size = 10 * dim
    dimension_bounds = np.concatenate(
        (problem.xl, problem.xu)).reshape(2, len(problem.xl)).T
    x = generate_MD_LHSample(dimension_bounds, dim, sample_size)
    y_values = problem.evaluate(x, return_values_of=["F", "feasible"])
    x = x[y_values[1].flatten()]
    y_values = y_values[0][y_values[1].flatten()]
    cv_splits = 10
    encountered = y_values
    predicted_y_gc = []  #  greatest contributor according to ensemble models
    counter = 1
    while counter < eval_budget + 1 and time_budget > 0:
        start = time.time()
        models_per_objective, weights_per_objective, poi_per_objective = [], [], []
        for obj in range(problem.n_obj):
            print("\x1b[1A\x1b[2K\x1b[1A\x1b[2K\x1b[1A")
            print("Iteration:", counter, '/', eval_budget, "(Objective",
                  obj + 1, '/', problem.n_obj, ')\n')
            output = minimize_objective(x, y_values.T[obj], cv_splits, dim,
                                        dimension_bounds)
            models_per_objective.append(output[0])
            weights_per_objective.append(output[1])
            poi_per_objective.append(output[2])  # points of interest
        poi_concatenated = np.concatenate(poi_per_objective)
        if poi_concatenated.size == 0:
            print("No new points found, optimization process complete.")
            return ((x, y_values, (models_per_objective,
                                   weights_per_objective), predicted_y_gc))
        potential_points = np.zeros(
            (problem.n_obj,
             len(poi_concatenated))).T  # obj * total points of interest matrix
        for i in range(len(models_per_objective)):
            for j in range(len(np.concatenate(poi_per_objective))):
                potential_points[j][i] = ensemble_prediction(
                    poi_concatenated[j], models_per_objective[i],
                    weights_per_objective[i], x, y_values.T[i].reshape(-1,
                                                                       1), dim)
        hv = pg.hypervolume(potential_points)
        encountered = np.concatenate((encountered, potential_points))
        reference_point = calc_rp(encountered)
        predicted_y_gc.append(
            potential_points[hv.greatest_contributor(reference_point)])
        greatest_contributor = [
            poi_concatenated[hv.greatest_contributor(reference_point)]
        ]
        '''Evaluate new point greatest contributor'''
        evaluated_new = problem.evaluate(greatest_contributor,
                                         return_values_of=["F"])
        x = np.concatenate((x, [greatest_contributor[0]]))
        y_values = np.concatenate((y_values, evaluated_new))
        end = time.time()
        time_budget = time_budget - (end - start)
        counter = counter + 1
    return ((x, y_values, (models_per_objective, weights_per_objective),
             predicted_y_gc))
def select_result_BHV(archive, remaining_size, refer_point):
    new_archive = list(archive)
    count = len(new_archive)
    while count > remaining_size:
        hv = pg.hypervolume(new_archive)
        index = hv.least_contributor(refer_point)
        new_archive.pop(index)
        count -= 1
    return np.array(new_archive)
Пример #23
0
def compute_hypervolume(q_set, nA, ref):
    q_values = np.zeros(nA)
    for i in range(nA):
        # pygmo uses hv minimization,
        # negate rewards to get costs
        points = np.array(q_set[i]) * -1.
        hv = hypervolume(points)
        # use negative ref-point for minimization
        q_values[i] = hv.compute(ref*-1)
    return q_values
Пример #24
0
def hv_point_pop():
    ref_point = [2, 2]
    points = [[1, 2], [0.5, 3], [0.1, 3.1]]
    hv = pg.hypervolume(points)

    udp = pg.dtlz(prob_id=1, dim=5, fdim=4)
    pop = pg.population(prob=udp, size=40)
    # hv = pg.hypervolume(pop=pop)
    udp.plot(pop)
    print(pop)
Пример #25
0
def hvContribution(Pop, Zmin, a):

    hvCont = np.zeros(Pop.NPop)
    indND = np.where(Pop.rank == 1)[0]
    NDobj = Pop.obj[indND] * a + Zmin
    ref = NDobj.max(axis=0) * 1.1
    hv = hypervolume(NDobj.tolist())
    for i in np.arange(len(indND)):
        hvCont[indND[i]] = hv.exclusive(i, ref.tolist())

    Pop.hvCont = hvCont
Пример #26
0
def hypervolume(ds, ref_point=None):
    scaled_objectives = objective_space(ds, scale=True)

    hv = pygmo.hypervolume(scaled_objectives.to_array().T)

    if ref_point is None:
        ref_point = hv.refpoint()

    return xr.DataArray(
        hv.compute(ref_point), name="hypervolume", attrs={"units": None}
    )
Пример #27
0
def calc_hypervolume(vectors: list, reference: Vector) -> float:
    """
    By default, the pygmo library is used for minimization problems.
    In our case, we need it to work for maximization problems.
    :param vectors: List of vectors limits of hypervolume
    :param reference: Reference vector to calc hypervolume
    :return: hypervolume area.
    """
    # Multiply by -1, to convert maximize problem into minimize problem.
    return pg.hypervolume([v.components * -1 for v in vectors
                           ]).compute(reference.components * -1)
Пример #28
0
def hypervolume(pointset, ref):
    """Compute the absolute hypervolume of a *pointset* according to the
    reference point *ref*.
    """

    #make sure all points are smaller then pointset
    pointset = pointset[np.all(pointset <= ref, axis=1)]

    if len(pointset) == 0:
        return 0
    hv = pg.hypervolume(pointset)
    contribution = hv.compute(ref)
    return contribution
Пример #29
0
def hyp_compute(point_list, mutate_point, reference_point):

    point_collection = list()
    point_collection.append(point_list)
    for i in range(len(point_list)):
        temp = point_list.copy()
        temp[i] = mutate_point
        point_collection.append(temp)

    hyper_value = []
    for j in range(len(point_collection)):
        hyper_value.append(
            pg.hypervolume(point_collection[j]).compute(reference_point))
    return point_collection, hyper_value
Пример #30
0
def hypcompute(pList, random, ref_point):
    pList0 = pList
    lList = []
    lList.append(pList0)
    for i in range(len(pList)):
        temp = pList.copy()
        temp[i] = spherepoint(random).point()
        lList.append(temp)
    # print(lList)

    hypvalue = []
    for j in range(len(lList)):
        hypvalue.append(pg.hypervolume(lList[j]).compute(ref_point.point()))
    # print(hypvalue)
    return lList, hypvalue