コード例 #1
0
ファイル: numpyex.py プロジェクト: KariVillagran/memetico
def computeHV(maxMinValues, fronteras):
	minObj1, maxObj1 = maxMinValues[0][0], maxMinValues[0][1]
	minObj2, maxObj2 = maxMinValues[1][0], maxMinValues[1][1]
	#print minObj1, maxObj1, minObj2, maxObj2
	difObj1 = maxObj1 - minObj1
	difObj2 = maxObj2 - minObj2
	#Se guarda en esta lista debido a que los resultados iran dentro del objeto... en normalizedValues...
	listaHyperVol = []
	normalizados = []
	for i in range(len(fronteras)):
		#print i
		normValues = []
		for elemento in fronteras[i]:
			#print elemento
			values = []
			cost1 = elemento[0]
			cost2 = elemento[1]
			if difObj1 == 0:
				valueObj1 = 0
			else:
				valueObj1 = (cost1 - minObj1)/difObj1
			if difObj2 == 0:
				valueObj2 = 0
			else: 
				valueObj2 = (cost2 - minObj2)/difObj2
			values.append(valueObj1), values.append(valueObj2)
			normValues.append(values)
		normalizados.append(normValues)
	referencePoint = [2,2]
	for i in range(len(normalizados)):
		hv = HyperVolume(referencePoint)
		volume = hv.compute(normalizados[i])
		listaHyperVol.append(volume)
	#print "Valores de HV para cada run: ", listaHyperVol    
	return listaHyperVol	
コード例 #2
0
ファイル: main.py プロジェクト: MrMJ06/ASC-GeneticAlgorithms
def print_front(front, ideal, final=False, type=0):

    plt.scatter(ideal[0], ideal[1], c='r', alpha=0.1, linewidths=0.01)
    plt.scatter(front[0], front[1], c='b', linewidths=0.01)
    front = transform_points(front)
    if final:
        if type != 0:
            nsga_points = get_points("best_pop_cf6_4.out")
        else:
            nsga_points = get_points("best_pop.out")

        plt.scatter(nsga_points[0],
                    nsga_points[1],
                    c="y",
                    alpha=0.8,
                    linewidth=0.01)
        referencePoint = [1, 1]
        hyperVolume = HyperVolume(referencePoint)
        nsga_points = transform_points(nsga_points)
        result = hyperVolume.compute(front)
        result_nsga = hyperVolume.compute(nsga_points)
        coverage_f2_f1 = calculate_coverage(front, nsga_points)
        coverage_f1_f2 = calculate_coverage(nsga_points, front)
        print("Coverage my front over other front = " + str(coverage_f2_f1))
        print("Coverage other front over my front = " + str(coverage_f1_f2))
        print("Hypervolume my solution = " + str(result))
        print("Hypervolume other solution = " + str(result_nsga))
    plt.show()
コード例 #3
0
def compute_pyhv(approximation_set, reference_point):
    """
		returns the hypervolume of the approximation set with respect to the reference point based on Simon Wessing's code
	"""
    #referencePoint = [2, 2, 2]
    hv = HyperVolume(reference_point)
    #front = [[1,0,1], [0,1,0]]
    return hv.compute(approximation_set)
コード例 #4
0
ファイル: test_MPM2.py プロジェクト: wangronin/HIGA-MO
def run_optimizer(optimizer, maxiter, mu, ref):

    # hyper-volume calculation
    hv = HyperVolume(ref)

    # track the population
    pop_track = DataFrame(zeros((maxiter, 2 * mu)), \
        columns=['x^{}_{}'.format(i, j) for i in range(1, mu+1) for j in [1, 2]])
    pop_f_track = DataFrame(zeros((maxiter, 2 * mu)), \
        columns=['f^{}_{}'.format(i, j) for i in range(1, mu+1) for j in [1, 2]])
    dominance_track = DataFrame(zeros((maxiter, mu), dtype='int'),
                                columns=range(1, mu + 1))

    hv_track = zeros(maxiter)
    hist_sigma = zeros((mu, maxiter))

    # recording the trajectories of dominated points
    pop_x_traject = {}
    pop_f_traject = {}

    for i in range(maxiter):
        # invoke the optimizer by step
        ND_idx, _ = optimizer.step()
        pop = deepcopy(optimizer.pop)
        fitness = deepcopy(_)
        fronts_idx = optimizer.fronts

        # compute the hypervolume indicator
        PF = fitness[:, fronts_idx[0]]
        hv_track[i] = hv.compute(PF.T)

        # tracking the whole population
        pop_track.loc[i] = pop.reshape(1, -1, order='F')
        pop_f_track.loc[i] = fitness.reshape(1, -1, order='F')
        for j, f in enumerate(fronts_idx):
            dominance_track.iloc[i, f] = j

        # record the trajectory of dominated points
        for j, idx in enumerate(range(optimizer.mu)):
            if pop_f_traject.has_key(idx):
                if any(pop_f_traject[idx][-1, :] != fitness[:, j]):
                    pop_f_traject[idx] = np.vstack(
                        (pop_f_traject[idx], fitness[:, j]))
            else:
                pop_f_traject[idx] = np.atleast_2d(fitness[:, j])

            if pop_x_traject.has_key(idx):
                if any(pop_x_traject[idx][-1, :] != pop[:, j]):
                    pop_x_traject[idx] = np.vstack(
                        (pop_x_traject[idx], pop[:, j]))
            else:
                pop_x_traject[idx] = np.atleast_2d(pop[:, j])

    return pop_track, pop_f_track, dominance_track, pop_x_traject, pop_f_traject, \
        hv_track, hist_sigma
コード例 #5
0
def evolve_history(dag, key, hv_only=True):
    hv = HyperVolume(hv_ref)
    d, nz = fetch(dag, keys, "trace", lambda x: x[-1])
    t = {}
    for k, v in d.iteritems():
        best = max(v, key=lambda x: hv.compute(map(nz, x[-1])))
        if hv_only:
            t[k] = [hv.compute(map(nz, x)) for x in best]
        else:
            t[k] = best
    return t
コード例 #6
0
def score(chro):
    front = []
    for i in range(len(chro)):
        if (chro[i][747] == 1):
            front.append(chro[i][745:747])
    #数组去重
    arr = np.array(front)
    front = np.array(list(set([tuple(t) for t in arr]))).tolist()
    #评分
    referencePoint = [700000, 1000]
    hyperVolume = HyperVolume(referencePoint)
    result = hyperVolume.compute(front)
    return result
コード例 #7
0
ファイル: Ikedafunc.py プロジェクト: NKanazawa/NKProject
def recHV(population, refs):
    truefront = emo.selFinal(population, 200)
    if len(truefront) == 1 and not truefront[0].isFeasible:
        return truefront, 0
    else:
        dcfront = copy.deepcopy(truefront)
        tfPoint = []
        for i, ind in enumerate(dcfront):
            if not checkDuplication(ind, dcfront[:i]):
                tfPoint.append([ind.fitness.values[0], ind.fitness.values[1]])
        hy = HyperVolume(refs)
        HV = hy.compute(tfPoint)
        return truefront, HV
コード例 #8
0
def calculate_hv(intervals_list, hv_reference_point):
    referencePoint = hv_reference_point
    hyperVolume = HyperVolume(referencePoint)

    front = []
    for interval in intervals_list:
        for solution in interval.current_solutions:
            front.append([
                solution.travel_time / hv_normalization_factor, solution.price
            ])
    front.sort()
    front = list(front for front, _ in itertools.groupby(front))
    result = hyperVolume.compute(front)
    return result
コード例 #9
0
def slave():
    """This function performs the actual heavy computation
    """
    # Get the master communicator
    comm = MPI.Comm.Get_parent()
    
    optimizer, prob = comm.bcast(None, root=0)
    
    n_approximation = 1000
    ref = prob.ref
    pareto_f2 = prob.pareto_front()
    hv = HyperVolume(ref)
    
    f1 = np.linspace(0, 1, n_approximation)
    f2 = pareto_f2(f1)
    pareto_front = np.vstack([f1, f2]).T
    
    # run the algorithm
    optimizer.optimize()
        
    front_idx = optimizer.pareto_front
    front = optimizer.fitness[:, front_idx].T
    
    # Compute the performance metrics
    volume = hv.compute(front)
    n_point = len(front_idx)
    
    convergence = 0
    for p in front:
        dis = np.sqrt(np.sum((pareto_front - p) ** 2.0, axis=1))
        convergence += np.min(dis)
    
    convergence /= len(front_idx)
    
    # synchronization...
    comm.Barrier()
    
    output = {
             'hv': volume,
             'convergence': convergence,
             'n_point': n_point
             }
    
    # return performance metrics
    comm.gather(output, root=0)
    
    # free all slave processes
    comm.Disconnect()
コード例 #10
0
ファイル: numpyex.py プロジェクト: KariVillagran/memetico
def computeHyperVolume(maxMinValues, fronteras, generacion):
	minObj1, maxObj1 = maxMinValues[0][0], maxMinValues[0][1]
	minObj2, maxObj2 = maxMinValues[1][0], maxMinValues[1][1]
	#print minObj1, maxObj1, minObj2, maxObj2
	difObj1 = maxObj1 - minObj1
	difObj2 = maxObj2 - minObj2
	#Se guarda en esta lista debido a que los resultados iran dentro del objeto... en normalizedValues...
	listaHyperVol = []
	normalizados = []
	#print len(fronteras)
	#h = input("")
	for i in range(len(fronteras)):
		normValues = []
	#print i
		for j,frontera in enumerate(fronteras[i]):
		
			if j == generacion:
				for elemento in frontera:

				#h = input(". . . .")
					values = []
					cost1 = elemento[0]
					cost2 = elemento[1]
					if difObj1 == 0:
						valueObj1 = 0
					else:
						valueObj1 = (cost1 - minObj1)/difObj1
					if difObj2 == 0:
						valueObj2 = 0
					else: 
						valueObj2 = (cost2 - minObj2)/difObj2
						values.append(valueObj1), values.append(valueObj2)
						normValues.append(values)
			#print "valores normalizados"
			#for value in normValues:
			#   print value
				normalizados.append(normValues)
		#self.normalizedValues.append(normValues)
	#h = input("")
	referencePoint = [2,2]
	for i in range(len(normalizados)):
		hv = HyperVolume(referencePoint)
		volume = hv.compute(normalizados[i])
		listaHyperVol.append(volume)
	print "Valores de HV para cada run: ", listaHyperVol    
	print len(listaHyperVol)
	return listaHyperVol
コード例 #11
0
 def compute_fitness(self):
     # Step 0 - Obtain fevals of front
     front = deepcopy(self.contents)
     nrec = len(front)
     if nrec == 1:
         self.contents[0].fitness = 1
     else:
         fvals = [rec.fx for rec in front]
         nobj = len(front[0].fx)
         # Step 1 - Normalize Objectives
         normalized_fvals = normalize_objectives(fvals)
         # Step 2 - Compute Hypervolume Contribution
         hv = HyperVolume(1.1*np.ones(nobj))
         base_hv = hv.compute(np.asarray(normalized_fvals))
         for i in range(nrec):
             fval_without = deepcopy(normalized_fvals)
             fval_without.remove(fval_without[i])
             new_hv = hv.compute(np.asarray(fval_without))
             hv_contrib = base_hv - new_hv
             self.contents[i].fitness = hv_contrib
コード例 #12
0
 def compute_hv_fitness(self):
     # Step 0 - Obtain fevals of front
     front = deepcopy(self.F_box)
     nobj, nrec = front.shape
     if nrec == 1:
         self.contents[0].fitness = 1
     else:
         fvals = np.transpose(front)
         fvals = fvals.tolist()
         # Step 1 - Normalize Objectives
         normalized_fvals = normalize_objectives(fvals)
         # Step 2 - Compute Hypervolume Contribution
         hv = HyperVolume(1.1*np.ones(nobj))
         base_hv = hv.compute(np.asarray(normalized_fvals))
         for i in range(nrec):
             fval_without = deepcopy(normalized_fvals)
             fval_without.remove(fval_without[i])
             new_hv = hv.compute(np.asarray(fval_without))
             hv_contrib = base_hv - new_hv
             self.contents[i].fitness = hv_contrib
コード例 #13
0
def fetch_rts(dag):
    d = {s: {} for s in rts}
    nzs = {s:Normaliser() for s in rts}
    for k, v in query(dag, ["algorithm", "runtime_scale"], "results"):
        alg, s = k.split("-")
        s = float(s)
        if alg not in d[s]:
            d[s][alg] = [v]
        else:
            d[s][alg].append(v)
        nzs[s].update(v)

    hv = HyperVolume(hv_ref)
    for s, dd in d.iteritems():
        for alg, vs in  dd.iteritems():
            v = max(hv.compute(map(nzs[s], v)) for v in vs)
            d[s][alg] = v
    res = {}
    for alg in d[rts[0]].iterkeys():
        res[alg] = [d[s][alg] for s in rts]
    return res
コード例 #14
0
ファイル: memetrics.py プロジェクト: KariVillagran/memetico
    def computeHVPO(self, maxMinValues, frontera):
        minObj1, maxObj1 = maxMinValues[0][0], maxMinValues[0][1]
        minObj2, maxObj2 = maxMinValues[1][0], maxMinValues[1][1]
        #print minObj1, maxObj1, minObj2, maxObj2
        difObj1 = maxObj1 - minObj1
        difObj2 = maxObj2 - minObj2
        if difObj1 == 0:
            print "El valor de hyperVolume es: ", 4.0
            self.hyperVolumePO.append(4.0)
        #Se guarda en esta lista debido a que los resultados iran dentro del objeto... en normalizedValues...
        #normalizados = []
        else:

            normValues = []
            for elemento in frontera:

                #print elemento
                values = []
                cost1 = elemento[0]
                cost2 = elemento[1]
                valueObj1 = (cost1 - minObj1) / difObj1
                valueObj2 = (cost2 - minObj2) / difObj2
                values.append(valueObj1), values.append(valueObj2)
                normValues.append(values)
            #print "valores normalizados"
            #for value in normValues:
            #   print value
            #normalizados.append(normValues)

            #self.normalizedValues.append(normValues)
            referencePoint = [2, 2]
            #for i in range(len(normalizados)):
            hv = HyperVolume(referencePoint)
            volume = hv.compute(normValues)
            self.hyperVolumePO.append(volume)
        for volume in self.hyperVolumePO:
            print "El HyperVolumen es: ", volume
        return 1
コード例 #15
0
    def select_points(self, front, xcand_nd, fhvals_nd, indices=None):

        # Use hypervolume contribution to select the next best
        # Step 1 - Normalize Objectives
        (M, l) = xcand_nd.shape
        temp_all = np.vstack((fhvals_nd, front))
        minpt = np.zeros(self.data.nobj)
        maxpt = np.zeros(self.data.nobj)
        for i in range(self.data.nobj):
            minpt[i] = np.min(temp_all[:,i])
            maxpt[i] = np.max(temp_all[:,i])
        normalized_front = np.asarray(normalize_objectives(front, minpt, maxpt))
        (N, temp) = normalized_front.shape
        normalized_cand_fh = np.asarray(normalize_objectives(fhvals_nd.tolist(), minpt, maxpt))

        # Step 2 - Make sure points already selected are not included in new points list
        if indices is not None:
            nd = range(N)
            dominated = []
            for index in indices:
                fvals = np.vstack((normalized_front, normalized_cand_fh[index,:]))
                (nd, dominated) = ND_Add(np.transpose(fvals), dominated, nd)
            normalized_front = fvals[nd,:]
            N = len(nd)

        # Step 3 - Compute Hypervolume Contribution
        hv = HyperVolume(1.1*np.ones(self.data.nobj))
        xnew = np.zeros((self.npts, l))
        if indices is None:
            indices = []
        hv_vals = -1*np.ones(M)
        hv_vals[indices] = -2
        for j in range(self.npts):
            # 3.1 - Find point with best HV improvement
            base_hv = hv.compute(normalized_front)
            for i in range(M):
                if hv_vals[i] != 0 and hv_vals[i] != -2:
                    nd = range(N)
                    dominated = []
                    fvals = np.vstack((normalized_front, normalized_cand_fh[i,:]))
                    (nd, dominated) = ND_Add(np.transpose(fvals), dominated, nd)
                    if dominated and dominated[0] == N: # Record is dominated
                        hv_vals[i] = 0
                    else:
                        new_hv = hv.compute(fvals[nd,:])
                        hv_vals[i] = new_hv - base_hv
            # vals = np.zeros((M,2))
            # vals[:,0] = xcand_nd[:,0]
            # vals[:,1] = hv_vals
            # print(vals)
            # 3.2 - Update selected candidate list
            index = np.argmax(hv_vals)
            xnew[j,:] = xcand_nd[index,:]
            indices.append(index)
            # 3.3 - Bar point from future selection and update non-dominated set
            hv_vals[index] = -2
            nd = range(N)
            dominated = []
            fvals = np.vstack((normalized_front, normalized_cand_fh[index,:]))
            (nd, dominated) = ND_Add(np.transpose(fvals), dominated, nd)
            normalized_front = fvals[nd,:]
            N = len(nd)
        return indices
コード例 #16
0
ファイル: main.py プロジェクト: Jsllmj/cosmos
def evaluate(j, e, method, scores, data_loader, logdir, reference_point, split,
             result_dict):
    assert split in ['train', 'val', 'test']
    global volume_max
    global epoch_max

    score_values = np.array([])
    for batch in data_loader:
        batch = utils.dict_to_cuda(batch)

        # more than one solution for some solvers
        s = []
        for l in method.eval_step(batch):
            batch.update(l)
            s.append([s(**batch) for s in scores])
        if score_values.size == 0:
            score_values = np.array(s)
        else:
            score_values += np.array(s)

    score_values /= len(data_loader)
    hv = HyperVolume(reference_point)

    # Computing hyper-volume for many objectives is expensive
    volume = hv.compute(score_values) if score_values.shape[1] < 5 else -1

    if len(scores) == 2:
        pareto_front = utils.ParetoFront(
            [s.__class__.__name__ for s in scores], logdir,
            "{}_{:03d}".format(split, e))
        pareto_front.append(score_values)
        pareto_front.plot()

    result = {
        "scores": score_values.tolist(),
        "hv": volume,
    }

    if split == 'val':
        if volume > volume_max:
            volume_max = volume
            epoch_max = e

        result.update({
            "max_epoch_so_far": epoch_max,
            "max_volume_so_far": volume_max,
            "training_time_so_far": elapsed_time,
        })
    elif split == 'test':
        result.update({
            "training_time_so_far": elapsed_time,
        })

    result.update(method.log())

    if f"epoch_{e}" in result_dict[f"start_{j}"]:
        result_dict[f"start_{j}"][f"epoch_{e}"].update(result)
    else:
        result_dict[f"start_{j}"][f"epoch_{e}"] = result

    with open(pathlib.Path(logdir) / f"{split}_results.json", "w") as file:
        json.dump(result_dict, file)

    return result_dict
コード例 #17
0
def front_objs(dag, keys):
	hv = HyperVolume(hv_ref)
	d, nz = fetch(dag, keys, "results")
	for k, v in d.iteritems():
		d[k] = max(v, key=lambda x: hv.compute(map(nz, x)))
	return d
コード例 #18
0
def best_hvs(dag, keys):
	hv = HyperVolume(hv_ref)
	d, nz = fetch(dag, keys, "results")
	for k, v in d.iteritems():
		d[k] = max(hv.compute(map(nz, x)) for x in v)
	return dag, d
コード例 #19
0
    def step_size_control(self):
        # ------------------------------ IMPORTANT -------------------------------
        # The step-size adaptation is of vital importance here!!!
        # general control rule to improve the stability: when a point tries to merge
        # into a front that dominates it in the last teration, the step-size
        # of this particular point is set to the mean step-size of the front.
        if hasattr(self, 'dominance_track_old'):
            for i, rank in enumerate(self.dominance_track):
                if rank != self.dominance_track_old[i]:
                    idx = self.fronts[rank]
                    mean_step_size = np.median(self.individual_step_size[idx])
                    self.individual_step_size[i] = mean_step_size

        self.dominance_track_old = deepcopy(self.dominance_track)

        #==============================================================================
        # step-size control method 1: detection of oscillating HV
        # works in very primitive case, needs more test
        # It requires hypervolume indicator computation, which is time-consuming
        #==============================================================================
        if 11 < 2:
            front = self.fitness[:, self.pareto_front]
            hv = HyperVolume(-self.ref[:, 0])
            self.hv_history[self.itercount % 10] = hv.compute(front.T.tolist())

            if (self.dominated_steer == 'NDS' and len(self.fronts) == 1) or \
                (not self.dominated_steer == 'NDS' and len(self.idx_ZU) == 0):
                if len(np.unique(self.hv_history)) == 2:
                    self.step_size *= 0.8

        #==============================================================================
        # Step size control method 2: cumulative step-size control
        # It works reasonably good among many tests and does not require too much
        # additional computational time
        #==============================================================================
        if self.itercount != 0:
            # the learning rate setting is largely afffected by the situation of
            # oscillation. The smaller this value is, the larger oscilation it
            # could handle. However, the smaller this value implies slower learning rate
            alpha = 0.7  # used for general purpose
            # alpha = 0.5     # currently used for Explorative Landscape Analysis
            c = 0.2
            if 11 < 2:
                if self.dominated_steer == 'NDS':
                    control_set = range(self.mu)
                else:
                    control_set = self.idx_P
            else:
                # TODO: verify this: applying the cumulative step-size control to all
                # the search points
                control_set = range(self.mu)

            if 1 < 2:
                from scipy.spatial.distance import cdist
                for idx in control_set:
                    self.inner_product[idx] = (1 - c) * self.inner_product[idx] + \
                        c * np.inner(self.path[:, idx], self.gradient_norm[:, idx])

                    if 11 < 2:
                        # step-size control rule similar to 1/5-rule in ES
                        if self.inner_product[idx] < 0:
                            self.individual_step_size[idx] *= alpha
                        else:
                            step_size_ = self.individual_step_size[idx] / alpha
                            self.individual_step_size[idx] = np.min(
                                [np.inf * self.step_size, step_size_])

                    # control the change rate of the step-size by passing the cumulative
                    # dot product into the exponential function
                    step_size_ = self.individual_step_size[idx] * \
                        np.exp((self.inner_product[idx])*alpha)

                    # put a upper bound on the adaptive step-size to avoid it becoming two
                    # large! The upper bound is calculated as the distance from one point
                    # to its nearest neighour in the decision space.
                    _ = [
                        i for i, front in enumerate(self.fronts)
                        if idx in front
                    ][0]
                    front = self.fronts[_]
                    if len(front) != 1:
                        __ = list(set(front) - set([idx]))
                        dis = cdist(np.atleast_2d(self.pop[:, idx]),
                                    self.pop[:, __].T)
                        step_size_ub = 0.7 * np.min(dis)
                    else:
                        step_size_ub = 4. * self.step_size

                    self.individual_step_size[idx] = np.min(
                        [step_size_ub, step_size_])
                    self.path[:, idx] = self.gradient_norm[:, idx]

        #==============================================================================
        # step-size control method 3: exploit the backtracing Line search to find
        # the optimal step-size setting. works but requires much more function evaluations
        #==============================================================================
        if 11 < 2:
            for idx in self.idx_P:
                self.individual_step_size[
                    idx] = self.__backtracing_line_search(idx)
コード例 #20
0
    def __init__(self,
                 dim_d,
                 dim_o,
                 lb,
                 ub,
                 mu=40,
                 fitness=None,
                 gradient=None,
                 ref=None,
                 initial_step_size=0.1,
                 maximize=True,
                 sampling='uniform',
                 adaptive_step_size=True,
                 verbose=False,
                 **kwargs):
        """
        Hypervolume Indicator Gradient Ascent Algortihm class

        Parameters
        ----------

        dim_d : integer
            decision space dimension

        dim_o : integer
            objective space dimension

        lb : array
            lower bound of the search domain

        ub : array
            upper bound of the search domain

        mu :  integer
            the size of the Pareto approxiamtion set

        fitness : callable or list of callables (functions) (vector-evaluated) objective 
            function

        gradient : callable or list of callables (functions)
            the gradient (Jacobian) of the objective function

        ref : array or list 
            the reference point

        initial_step_size : numeric or string
            the inital step size, it could be a string subject to evaluation
            
        maximize : boolean or list of boolean
            Is the objective functions subject to maximization. If it is a list, it 
            specifys the maximization option per objective dimension

        sampling : string
            the method used in the initial sampling of the approximation set

        adaptive_step_size : boolean
            whether to enable to adaptive control for the step sizes, enabled by default

        verbose : boolean
            controls the verbosity

        kwargs: additional parameters, including:
            steer_dominated : string
                the method to steer (move) the dominated points. Available options: are
                'NDS', 'M1', 'M2', 'M3', 'M4', 'M5'. 'NDS' stands for Non-dominated 
                Sorting and enabled by default. For the detail of the methods here, 
                please refer to paper [2] below.

            enable_dominated : boolen, 
                whether to include dominated points population, for test purpose only

            normalize : boolean
                if the gradient is normalized or not

        References:

        .. [1] Wang H., Deutz A., Emmerich M.T.M. & Bäck T.H.W., Hypervolume Indicator 
            Gradient Ascent Multi-objective Optimization. In Lecture Notes in Computer 
            Science 10173:654-669. DOI: 10.1007/978-3-319-54157-0_44. In book: 
            Evolutionary Multi-Criterion Optimization, pp.654-669.

        .. [2] Wang H., Ren Y., Deutz A. & Emmerich M.T.M., On Steering 
            Dominated Points in Hypervolume Indicator Gradient Ascent for Bi-Objective 
            Optimization. In: Schuetze O., Trujillo L., Legrand P., Maldonado Y. (Eds.) 
            NEO 2015: Results of the Numerical and Evolutionary Optimization Workshop NEO 
            2015 held at September 23-25 2015 in Tijuana, Mexico. no. Studies in 
            Computational Intelligence 663. International Publishing: Springer.
                

        """
        self.dim_d = dim_d
        self.dim_o = dim_o
        self.mu = mu
        self.verbose = verbose

        assert self.mu > 1  # single point is not allowed
        assert sampling in ['uniform', 'LHS', 'grid']
        self.sampling = sampling

        # step-size settings
        self.step_size = eval(initial_step_size) if isinstance(
            initial_step_size, basestring) else initial_step_size
        self.individual_step_size = np.repeat(self.step_size, self.mu)
        self.adaptive_step_size = adaptive_step_size

        # setup boundary in decision space
        lb, ub = atleast_2d(lb), atleast_2d(ub)
        self.lb = lb.T if lb.shape[1] != 1 else lb
        self.ub = ub.T if ub.shape[1] != 1 else ub

        # are objective functions subject to maximization
        if hasattr(maximize, '__iter__') and len(maximize) != self.dim_o:
            raise ValueError(
                'maximize should have the same length as fitnessfuncs')
        elif isinstance(maximize, bool):
            maximize = [maximize] * self.dim_o
        self.maximize = np.atleast_1d(maximize)

        # setup reference point
        self.ref = np.atleast_2d(ref).reshape(-1, 1)
        self.ref[~self.maximize, 0] = -self.ref[~self.maximize, 0]

        # setup the fitness functions
        if isinstance(fitness, (list, tuple)):
            if len(fitness) != self.dim_o:
                raise ValueError('fitness_grad: shape {} is inconsistent \
                    with dim_o:{}'.format(len(fitness), self.dim_o))
            self.fitness_func = fitness
            self.vec_eval_fitness = False
        elif hasattr(fitness, '__call__'):
            self.fitness_func = fitness
            self.vec_eval_fitness = True
        else:
            raise Exception('fitness should be either a list of functions or \
                a vector evaluated function!')

        # setup fitness gradient functions
        if isinstance(gradient, (list, tuple)):
            if len(gradient) != self.dim_o:
                raise ValueError('fitness_grad: shape {} is inconsistent \
                    with dim_o: {}'.format(len(gradient), self.dim_o))
            self.grad_func = gradient
            self.vec_eval_grad = False
        elif hasattr(gradient, '__call__'):
            self.grad_func = self.__obj_dx(gradient)
            self.vec_eval_grad = True
        else:
            raise Exception(
                'fitness_grad should be either a list of functions or \
                a matrix evaluated function!')

        # setup the performance metric functions for convergence detection
        try:
            self.performance_metric_func = kwargs['performance_metric']
            self.target_perf_metric = kwargs['target']
        except KeyError:
            self.performance_metric_func = None
            self.target_perf_metric = None

        self.normalize = kwargs['normalize'] if kwargs.has_key(
            'normalize') else True
        self.enable_dominated = True if not kwargs.has_key('enable_dominated') \
            else kwargs['enable_dominated']
        self.maxiter = kwargs['maxiter'] if kwargs.has_key('maxiter') else inf

        # dominated_steer for moving non-differentiable or zero-derivative points
        try:
            self.dominated_steer = kwargs['dominated_steer']
            assert self.dominated_steer in ['M' + str(i)
                                            for i in range(1, 6)] + ['NDS']
        except KeyError:
            self.dominated_steer = 'NDS'

        self.pop = None
        self.pop_old = None  # for potential rollback

        # create some internal variables
        self.gradient = zeros((self.dim_d, self.mu))
        self.gradient_norm = zeros((self.dim_d, self.mu))

        # list recording on which condition the optimizer terminates
        self.stop_list = []

        # iteration counter
        self.itercount = 0

        # step-size control mechanism
        self.hv_history = np.random.rand(10)
        self.hv = HyperVolume(-self.ref[:, 0])

        # step-size control mechanism
        self.path = zeros((self.dim_d, self.mu))
        self.inner_product = zeros(self.mu)

        # Assuming on the smooth landspace that is differentiable almost everywhere
        # We need to record the extreme point that is non-differentiable
        self.non_diff_point = []

        # dynamic reference points
        self.dynamic_ref = []

        # dominance rank of the points
        self.dominance_track = zeros(self.mu, dtype='int')

        # record the working states of all search points
        self._states = array(['NOT-INIT'] * self.mu, dtype='|S5')
コード例 #21
0
ファイル: numpyex.py プロジェクト: KariVillagran/memetico
def computeHyperVolumeIns(maxMinValues, fronteras, instance):
	minObj1, maxObj1 = maxMinValues[0][0], maxMinValues[0][1]
	minObj2, maxObj2 = maxMinValues[1][0], maxMinValues[1][1]
	#print minObj1, maxObj1, minObj2, maxObj2
	difObj1 = maxObj1 - minObj1
	difObj2 = maxObj2 - minObj2
	#Se guarda en esta lista debido a que los resultados iran dentro del objeto... en normalizedValues...
	listaHyperVol = []
	normalizados = []
	print len(fronteras)
	h = input("")
	listOfIndexes = []
	for i in range(len(fronteras)):
		print i
		if i == instance:
			print "largo", len(fronteras[i])
			#Cada 10 generaciones calculo HV, al final deberia tener 1300 valores de HV
			
			for j in range(1,len(fronteras[i])):
				normValues = []
				listOfIndexes.append(j)
				#print j
				#h = input("DEBERIA SER 13.000 SI ES ASÍ ESTOY BIEN")
				for elemento in fronteras[i][j]:
					#h = input(". . . .")
					values = []
					cost1 = elemento[0]
					cost2 = elemento[1]
					if difObj1 == 0:
						valueObj1 = 0
					else:
						valueObj1 = (cost1 - minObj1)/difObj1
					if difObj2 == 0:
						valueObj2 = 0
					else: 
						valueObj2 = (cost2 - minObj2)/difObj2
						values.append(valueObj1), values.append(valueObj2)
						normValues.append(values)
				normalizados.append(normValues)

		#print "valores normalizados"
		#for value in normValues:
		#   print value
				#normalizados.append(normValues)
		#self.normalizedValues.append(normValues)
	#h = input("")
	#en normValues tengo toda la caca
	print "len norm:" ,len(normalizados)
	#for i in range(len(normalizados)):
	##  print normalizados[i]
	# h = input(". . . .")
	referencePoint = [2,2]
	for i in range(len(normalizados)):
		print "Calculando HV . . . .", i 
		hv = HyperVolume(referencePoint)
		volume = hv.compute(normalizados[i])
		listaHyperVol.append(volume)
	#print volume
	#io = input(". . . .")
		
	results = []
	results.append(listOfIndexes)
	results.append(listaHyperVol)
	#print "Valores de HV para cada run: ", listaHyperVol    
	print len(listOfIndexes)
	print len(listaHyperVol)
	return results
コード例 #22
0
                              alpha=0.5)
    line10, line11 = ax1.plot([], [],
                              'or', [], [],
                              'ob',
                              ms=8,
                              mec='none',
                              alpha=0.5)

#    line00.set_clip_on(False)
#    line01.set_clip_on(False)
#    line10.set_clip_on(False)
#    line11.set_clip_on(False)

# ------------------------------- The animation ----------------------------------
# hyper-volume calculation
hv = HyperVolume(ref2)

delay = 10
toggle = True
t_start = time.time()
fps_movie = 15


def init():
    fps_text.set_text('')
    hv_text.set_text('')

    if not plot_layers:
        line00.set_data([], [])
        #        line01.set_data([], [])
        line10.set_data([], [])
コード例 #23
0
ファイル: eval_checkpoints.py プロジェクト: Jsllmj/cosmos
def evaluate(j, e, solver, scores1, scores2, data_loader, logdir,
             reference_point, split, result_dict):
    """
    Do one forward pass through the dataloader and log the scores.
    """
    assert split in ['train', 'val', 'test']

    # mode = 'mcr'
    mode = 'pf'

    if mode == 'pf':
        # generate Pareto front
        assert len(scores1) == len(
            scores2
        ) <= 3, "Cannot generate cirlce points for more than 3 dimensions."
        n_test_rays = 25
        test_rays = utils.circle_points(n_test_rays, dim=len(scores1))
    elif mode == 'mcr':
        # calculate the MRCs using a middle ray
        test_rays = np.ones((1, len(scores1)))
        test_rays /= test_rays.sum(axis=1).reshape(1, 1)
    else:
        raise ValueError()

    print(test_rays[0])

    # we wanna calculate the loss and mcr
    score_values1 = np.array([])
    score_values2 = np.array([])

    for k, batch in enumerate(data_loader):
        print(f'eval batch {k+1} of {len(data_loader)}')
        batch = utils.dict_to_cuda(batch)

        # more than one for some solvers
        s1 = []
        s2 = []
        for l in solver.eval_step(batch, test_rays):
            batch.update(l)
            s1.append([s(**batch) for s in scores1])
            s2.append([s(**batch) for s in scores2])
        if score_values1.size == 0:
            score_values1 = np.array(s1)
            score_values2 = np.array(s2)
        else:
            score_values1 += np.array(s1)
            score_values2 += np.array(s2)

    score_values1 /= len(data_loader)
    score_values2 /= len(data_loader)

    hv = HyperVolume(reference_point)

    if mode == 'pf':
        pareto_front = utils.ParetoFront(
            [s.__class__.__name__ for s in scores1], logdir,
            "{}_{:03d}".format(split, e))
        pareto_front.append(score_values1)
        pareto_front.plot()
        volume = hv.compute(score_values1)
    else:
        volume = -1

    result = {
        "scores_loss": score_values1.tolist(),
        "scores_mcr": score_values2.tolist(),
        "hv": volume,
        "task": j,
        # expected by some plotting code
        "max_epoch_so_far": -1,
        "max_volume_so_far": -1,
        "training_time_so_far": -1,
    }

    result.update(solver.log())

    result_dict[f"start_{j}"][f"epoch_{e}"] = result

    with open(pathlib.Path(logdir) / f"{split}_results.json", "w") as file:
        json.dump(result_dict, file)

    return result_dict