Example #1
0
  def d_Run(self, ngen):
      volume = []
      spea2_count_hypervolume = []
      nsga2_count_hypervolume = []
      referencePoint = [11, 11]
      hv = HyperVolume(referencePoint)
      self.Evaluate()
      gplot = Gnuplot.Gnuplot()
      print "Starting"
      for i in xrange(ngen):
          print "Iteration  ", i
          self.selected_set = 50
          if i % 10 == 0:
              print "Change maded"
              self.selected_set = 100
  #            gg = self.Make_First_Pop()
 #             self.ListNonDominated = self.ListNonDominated + self.Make_First_Pop()
          global rho
          rho = rho + 1
          self.Evaluate_After()
          d = Gnuplot.Data(self.ApplyFunctions(self.ListNonDominated))
          gplot.plot(d)
          front = self.ApplyFunctions(self.front_0)
          volume.append(hv.compute(front))
          nsga2_count_hypervolume.append(
              [self.nsga2_count, hv.compute(front)])
          spea2_count_hypervolume.append(
              [self.spea2_count, hv.compute(front)])
      return nsga2_count_hypervolume, spea2_count_hypervolume
Example #2
0
def computeHV(maxMinValues, fronteras):
	minObj1, maxObj1 = maxMinValues[0][0], maxMinValues[0][1]
	minObj2, maxObj2 = maxMinValues[1][0], maxMinValues[1][1]
	#print minObj1, maxObj1, minObj2, maxObj2
	difObj1 = maxObj1 - minObj1
	difObj2 = maxObj2 - minObj2
	#Se guarda en esta lista debido a que los resultados iran dentro del objeto... en normalizedValues...
	listaHyperVol = []
	normalizados = []
	for i in range(len(fronteras)):
		#print i
		normValues = []
		for elemento in fronteras[i]:
			#print elemento
			values = []
			cost1 = elemento[0]
			cost2 = elemento[1]
			if difObj1 == 0:
				valueObj1 = 0
			else:
				valueObj1 = (cost1 - minObj1)/difObj1
			if difObj2 == 0:
				valueObj2 = 0
			else: 
				valueObj2 = (cost2 - minObj2)/difObj2
			values.append(valueObj1), values.append(valueObj2)
			normValues.append(values)
		normalizados.append(normValues)
	referencePoint = [2,2]
	for i in range(len(normalizados)):
		hv = HyperVolume(referencePoint)
		volume = hv.compute(normalizados[i])
		listaHyperVol.append(volume)
	#print "Valores de HV para cada run: ", listaHyperVol    
	return listaHyperVol	
Example #3
0
def print_front(front, ideal, final=False, type=0):

    plt.scatter(ideal[0], ideal[1], c='r', alpha=0.1, linewidths=0.01)
    plt.scatter(front[0], front[1], c='b', linewidths=0.01)
    front = transform_points(front)
    if final:
        if type != 0:
            nsga_points = get_points("best_pop_cf6_4.out")
        else:
            nsga_points = get_points("best_pop.out")

        plt.scatter(nsga_points[0],
                    nsga_points[1],
                    c="y",
                    alpha=0.8,
                    linewidth=0.01)
        referencePoint = [1, 1]
        hyperVolume = HyperVolume(referencePoint)
        nsga_points = transform_points(nsga_points)
        result = hyperVolume.compute(front)
        result_nsga = hyperVolume.compute(nsga_points)
        coverage_f2_f1 = calculate_coverage(front, nsga_points)
        coverage_f1_f2 = calculate_coverage(nsga_points, front)
        print("Coverage my front over other front = " + str(coverage_f2_f1))
        print("Coverage other front over my front = " + str(coverage_f1_f2))
        print("Hypervolume my solution = " + str(result))
        print("Hypervolume other solution = " + str(result_nsga))
    plt.show()
Example #4
0
def run_optimizer(optimizer, maxiter, mu, ref):

    # hyper-volume calculation
    hv = HyperVolume(ref)

    # track the population
    pop_track = DataFrame(zeros((maxiter, 2 * mu)), \
        columns=['x^{}_{}'.format(i, j) for i in range(1, mu+1) for j in [1, 2]])
    pop_f_track = DataFrame(zeros((maxiter, 2 * mu)), \
        columns=['f^{}_{}'.format(i, j) for i in range(1, mu+1) for j in [1, 2]])
    dominance_track = DataFrame(zeros((maxiter, mu), dtype='int'),
                                columns=range(1, mu + 1))

    hv_track = zeros(maxiter)
    hist_sigma = zeros((mu, maxiter))

    # recording the trajectories of dominated points
    pop_x_traject = {}
    pop_f_traject = {}

    for i in range(maxiter):
        # invoke the optimizer by step
        ND_idx, _ = optimizer.step()
        pop = deepcopy(optimizer.pop)
        fitness = deepcopy(_)
        fronts_idx = optimizer.fronts

        # compute the hypervolume indicator
        PF = fitness[:, fronts_idx[0]]
        hv_track[i] = hv.compute(PF.T)

        # tracking the whole population
        pop_track.loc[i] = pop.reshape(1, -1, order='F')
        pop_f_track.loc[i] = fitness.reshape(1, -1, order='F')
        for j, f in enumerate(fronts_idx):
            dominance_track.iloc[i, f] = j

        # record the trajectory of dominated points
        for j, idx in enumerate(range(optimizer.mu)):
            if pop_f_traject.has_key(idx):
                if any(pop_f_traject[idx][-1, :] != fitness[:, j]):
                    pop_f_traject[idx] = np.vstack(
                        (pop_f_traject[idx], fitness[:, j]))
            else:
                pop_f_traject[idx] = np.atleast_2d(fitness[:, j])

            if pop_x_traject.has_key(idx):
                if any(pop_x_traject[idx][-1, :] != pop[:, j]):
                    pop_x_traject[idx] = np.vstack(
                        (pop_x_traject[idx], pop[:, j]))
            else:
                pop_x_traject[idx] = np.atleast_2d(pop[:, j])

    return pop_track, pop_f_track, dominance_track, pop_x_traject, pop_f_traject, \
        hv_track, hist_sigma
Example #5
0
def evolve_history(dag, key, hv_only=True):
    hv = HyperVolume(hv_ref)
    d, nz = fetch(dag, keys, "trace", lambda x: x[-1])
    t = {}
    for k, v in d.iteritems():
        best = max(v, key=lambda x: hv.compute(map(nz, x[-1])))
        if hv_only:
            t[k] = [hv.compute(map(nz, x)) for x in best]
        else:
            t[k] = best
    return t
Example #6
0
    def Hypervolume3D(self, front, refpoint):

        #transform front fitness to a list of fitness
        local_fit=[]
        for i in front:
            local_fit.append((i.fitness.values[0],i.fitness.values[1], i.fitness.values[2]))


        #evaluate the hypervolume
        hyper=HyperVolume(refpoint)
        aux = hyper.compute(local_fit)
        return aux/(refpoint[0]*refpoint[1]*refpoint[2])
Example #7
0
def score(chro):
    front = []
    for i in range(len(chro)):
        if (chro[i][747] == 1):
            front.append(chro[i][745:747])
    #数组去重
    arr = np.array(front)
    front = np.array(list(set([tuple(t) for t in arr]))).tolist()
    #评分
    referencePoint = [700000, 1000]
    hyperVolume = HyperVolume(referencePoint)
    result = hyperVolume.compute(front)
    return result
Example #8
0
def recHV(population, refs):
    truefront = emo.selFinal(population, 200)
    if len(truefront) == 1 and not truefront[0].isFeasible:
        return truefront, 0
    else:
        dcfront = copy.deepcopy(truefront)
        tfPoint = []
        for i, ind in enumerate(dcfront):
            if not checkDuplication(ind, dcfront[:i]):
                tfPoint.append([ind.fitness.values[0], ind.fitness.values[1]])
        hy = HyperVolume(refs)
        HV = hy.compute(tfPoint)
        return truefront, HV
Example #9
0
def calculate_hv(intervals_list, hv_reference_point):
    referencePoint = hv_reference_point
    hyperVolume = HyperVolume(referencePoint)

    front = []
    for interval in intervals_list:
        for solution in interval.current_solutions:
            front.append([
                solution.travel_time / hv_normalization_factor, solution.price
            ])
    front.sort()
    front = list(front for front, _ in itertools.groupby(front))
    result = hyperVolume.compute(front)
    return result
Example #10
0
def slave():
    """This function performs the actual heavy computation
    """
    # Get the master communicator
    comm = MPI.Comm.Get_parent()
    
    optimizer, prob = comm.bcast(None, root=0)
    
    n_approximation = 1000
    ref = prob.ref
    pareto_f2 = prob.pareto_front()
    hv = HyperVolume(ref)
    
    f1 = np.linspace(0, 1, n_approximation)
    f2 = pareto_f2(f1)
    pareto_front = np.vstack([f1, f2]).T
    
    # run the algorithm
    optimizer.optimize()
        
    front_idx = optimizer.pareto_front
    front = optimizer.fitness[:, front_idx].T
    
    # Compute the performance metrics
    volume = hv.compute(front)
    n_point = len(front_idx)
    
    convergence = 0
    for p in front:
        dis = np.sqrt(np.sum((pareto_front - p) ** 2.0, axis=1))
        convergence += np.min(dis)
    
    convergence /= len(front_idx)
    
    # synchronization...
    comm.Barrier()
    
    output = {
             'hv': volume,
             'convergence': convergence,
             'n_point': n_point
             }
    
    # return performance metrics
    comm.gather(output, root=0)
    
    # free all slave processes
    comm.Disconnect()
Example #11
0
def computeHyperVolume(maxMinValues, fronteras, generacion):
	minObj1, maxObj1 = maxMinValues[0][0], maxMinValues[0][1]
	minObj2, maxObj2 = maxMinValues[1][0], maxMinValues[1][1]
	#print minObj1, maxObj1, minObj2, maxObj2
	difObj1 = maxObj1 - minObj1
	difObj2 = maxObj2 - minObj2
	#Se guarda en esta lista debido a que los resultados iran dentro del objeto... en normalizedValues...
	listaHyperVol = []
	normalizados = []
	#print len(fronteras)
	#h = input("")
	for i in range(len(fronteras)):
		normValues = []
	#print i
		for j,frontera in enumerate(fronteras[i]):
		
			if j == generacion:
				for elemento in frontera:

				#h = input(". . . .")
					values = []
					cost1 = elemento[0]
					cost2 = elemento[1]
					if difObj1 == 0:
						valueObj1 = 0
					else:
						valueObj1 = (cost1 - minObj1)/difObj1
					if difObj2 == 0:
						valueObj2 = 0
					else: 
						valueObj2 = (cost2 - minObj2)/difObj2
						values.append(valueObj1), values.append(valueObj2)
						normValues.append(values)
			#print "valores normalizados"
			#for value in normValues:
			#   print value
				normalizados.append(normValues)
		#self.normalizedValues.append(normValues)
	#h = input("")
	referencePoint = [2,2]
	for i in range(len(normalizados)):
		hv = HyperVolume(referencePoint)
		volume = hv.compute(normalizados[i])
		listaHyperVol.append(volume)
	print "Valores de HV para cada run: ", listaHyperVol    
	print len(listaHyperVol)
	return listaHyperVol
def compute_pyhv(approximation_set, reference_point):
    """
		returns the hypervolume of the approximation set with respect to the reference point based on Simon Wessing's code
	"""
    #referencePoint = [2, 2, 2]
    hv = HyperVolume(reference_point)
    #front = [[1,0,1], [0,1,0]]
    return hv.compute(approximation_set)
Example #13
0
 def compute_fitness(self):
     # Step 0 - Obtain fevals of front
     front = deepcopy(self.contents)
     nrec = len(front)
     if nrec == 1:
         self.contents[0].fitness = 1
     else:
         fvals = [rec.fx for rec in front]
         nobj = len(front[0].fx)
         # Step 1 - Normalize Objectives
         normalized_fvals = normalize_objectives(fvals)
         # Step 2 - Compute Hypervolume Contribution
         hv = HyperVolume(1.1*np.ones(nobj))
         base_hv = hv.compute(np.asarray(normalized_fvals))
         for i in range(nrec):
             fval_without = deepcopy(normalized_fvals)
             fval_without.remove(fval_without[i])
             new_hv = hv.compute(np.asarray(fval_without))
             hv_contrib = base_hv - new_hv
             self.contents[i].fitness = hv_contrib
Example #14
0
 def Run(self,ngen):    
     volume = []
     spea2_count_hypervolume = []
     nsga2_count_hypervolume = []
     referencePoint = [11,11]
     hv = HyperVolume(referencePoint)      
     self.Evaluate()
     gplot = Gnuplot.Gnuplot()
     print "Starting"
     for i in xrange(ngen):
         if i % 1 ==0 :
             print "Iteration  ",i
         self.Evaluate_After()
         d=Gnuplot.Data(self.ApplyFunctions(self.ListNonDominated))
         gplot.plot(d)
         front = self.ApplyFunctions(self.front_0)
         volume.append (hv.compute(front))
         nsga2_count_hypervolume.append([self.nsga2_count,hv.compute(front)])
         spea2_count_hypervolume.append([self.spea2_count,hv.compute(front)])
     return nsga2_count_hypervolume,spea2_count_hypervolume
Example #15
0
 def compute_hv_fitness(self):
     # Step 0 - Obtain fevals of front
     front = deepcopy(self.F_box)
     nobj, nrec = front.shape
     if nrec == 1:
         self.contents[0].fitness = 1
     else:
         fvals = np.transpose(front)
         fvals = fvals.tolist()
         # Step 1 - Normalize Objectives
         normalized_fvals = normalize_objectives(fvals)
         # Step 2 - Compute Hypervolume Contribution
         hv = HyperVolume(1.1*np.ones(nobj))
         base_hv = hv.compute(np.asarray(normalized_fvals))
         for i in range(nrec):
             fval_without = deepcopy(normalized_fvals)
             fval_without.remove(fval_without[i])
             new_hv = hv.compute(np.asarray(fval_without))
             hv_contrib = base_hv - new_hv
             self.contents[i].fitness = hv_contrib
Example #16
0
def fetch_rts(dag):
    d = {s: {} for s in rts}
    nzs = {s:Normaliser() for s in rts}
    for k, v in query(dag, ["algorithm", "runtime_scale"], "results"):
        alg, s = k.split("-")
        s = float(s)
        if alg not in d[s]:
            d[s][alg] = [v]
        else:
            d[s][alg].append(v)
        nzs[s].update(v)

    hv = HyperVolume(hv_ref)
    for s, dd in d.iteritems():
        for alg, vs in  dd.iteritems():
            v = max(hv.compute(map(nzs[s], v)) for v in vs)
            d[s][alg] = v
    res = {}
    for alg in d[rts[0]].iterkeys():
        res[alg] = [d[s][alg] for s in rts]
    return res
Example #17
0
    def computeHVPO(self, maxMinValues, frontera):
        minObj1, maxObj1 = maxMinValues[0][0], maxMinValues[0][1]
        minObj2, maxObj2 = maxMinValues[1][0], maxMinValues[1][1]
        #print minObj1, maxObj1, minObj2, maxObj2
        difObj1 = maxObj1 - minObj1
        difObj2 = maxObj2 - minObj2
        if difObj1 == 0:
            print "El valor de hyperVolume es: ", 4.0
            self.hyperVolumePO.append(4.0)
        #Se guarda en esta lista debido a que los resultados iran dentro del objeto... en normalizedValues...
        #normalizados = []
        else:

            normValues = []
            for elemento in frontera:

                #print elemento
                values = []
                cost1 = elemento[0]
                cost2 = elemento[1]
                valueObj1 = (cost1 - minObj1) / difObj1
                valueObj2 = (cost2 - minObj2) / difObj2
                values.append(valueObj1), values.append(valueObj2)
                normValues.append(values)
            #print "valores normalizados"
            #for value in normValues:
            #   print value
            #normalizados.append(normValues)

            #self.normalizedValues.append(normValues)
            referencePoint = [2, 2]
            #for i in range(len(normalizados)):
            hv = HyperVolume(referencePoint)
            volume = hv.compute(normValues)
            self.hyperVolumePO.append(volume)
        for volume in self.hyperVolumePO:
            print "El HyperVolumen es: ", volume
        return 1
Example #18
0
def evaluate(j, e, solver, scores1, scores2, data_loader, logdir,
             reference_point, split, result_dict):
    """
    Do one forward pass through the dataloader and log the scores.
    """
    assert split in ['train', 'val', 'test']

    # mode = 'mcr'
    mode = 'pf'

    if mode == 'pf':
        # generate Pareto front
        assert len(scores1) == len(
            scores2
        ) <= 3, "Cannot generate cirlce points for more than 3 dimensions."
        n_test_rays = 25
        test_rays = utils.circle_points(n_test_rays, dim=len(scores1))
    elif mode == 'mcr':
        # calculate the MRCs using a middle ray
        test_rays = np.ones((1, len(scores1)))
        test_rays /= test_rays.sum(axis=1).reshape(1, 1)
    else:
        raise ValueError()

    print(test_rays[0])

    # we wanna calculate the loss and mcr
    score_values1 = np.array([])
    score_values2 = np.array([])

    for k, batch in enumerate(data_loader):
        print(f'eval batch {k+1} of {len(data_loader)}')
        batch = utils.dict_to_cuda(batch)

        # more than one for some solvers
        s1 = []
        s2 = []
        for l in solver.eval_step(batch, test_rays):
            batch.update(l)
            s1.append([s(**batch) for s in scores1])
            s2.append([s(**batch) for s in scores2])
        if score_values1.size == 0:
            score_values1 = np.array(s1)
            score_values2 = np.array(s2)
        else:
            score_values1 += np.array(s1)
            score_values2 += np.array(s2)

    score_values1 /= len(data_loader)
    score_values2 /= len(data_loader)

    hv = HyperVolume(reference_point)

    if mode == 'pf':
        pareto_front = utils.ParetoFront(
            [s.__class__.__name__ for s in scores1], logdir,
            "{}_{:03d}".format(split, e))
        pareto_front.append(score_values1)
        pareto_front.plot()
        volume = hv.compute(score_values1)
    else:
        volume = -1

    result = {
        "scores_loss": score_values1.tolist(),
        "scores_mcr": score_values2.tolist(),
        "hv": volume,
        "task": j,
        # expected by some plotting code
        "max_epoch_so_far": -1,
        "max_volume_so_far": -1,
        "training_time_so_far": -1,
    }

    result.update(solver.log())

    result_dict[f"start_{j}"][f"epoch_{e}"] = result

    with open(pathlib.Path(logdir) / f"{split}_results.json", "w") as file:
        json.dump(result_dict, file)

    return result_dict
Example #19
0
def evaluate(j, e, method, scores, data_loader, logdir, reference_point, split,
             result_dict):
    assert split in ['train', 'val', 'test']
    global volume_max
    global epoch_max

    score_values = np.array([])
    for batch in data_loader:
        batch = utils.dict_to_cuda(batch)

        # more than one solution for some solvers
        s = []
        for l in method.eval_step(batch):
            batch.update(l)
            s.append([s(**batch) for s in scores])
        if score_values.size == 0:
            score_values = np.array(s)
        else:
            score_values += np.array(s)

    score_values /= len(data_loader)
    hv = HyperVolume(reference_point)

    # Computing hyper-volume for many objectives is expensive
    volume = hv.compute(score_values) if score_values.shape[1] < 5 else -1

    if len(scores) == 2:
        pareto_front = utils.ParetoFront(
            [s.__class__.__name__ for s in scores], logdir,
            "{}_{:03d}".format(split, e))
        pareto_front.append(score_values)
        pareto_front.plot()

    result = {
        "scores": score_values.tolist(),
        "hv": volume,
    }

    if split == 'val':
        if volume > volume_max:
            volume_max = volume
            epoch_max = e

        result.update({
            "max_epoch_so_far": epoch_max,
            "max_volume_so_far": volume_max,
            "training_time_so_far": elapsed_time,
        })
    elif split == 'test':
        result.update({
            "training_time_so_far": elapsed_time,
        })

    result.update(method.log())

    if f"epoch_{e}" in result_dict[f"start_{j}"]:
        result_dict[f"start_{j}"][f"epoch_{e}"].update(result)
    else:
        result_dict[f"start_{j}"][f"epoch_{e}"] = result

    with open(pathlib.Path(logdir) / f"{split}_results.json", "w") as file:
        json.dump(result_dict, file)

    return result_dict
Example #20
0
				masterValues[i][j] += num
				j += 1
		
		i += 1
	

	f.close()	
	first=False

if (args.hv != ''):
	print('Calculating hypervolume...')
	referencePoint = []	
	for h in range(len(bounds)):
		referencePoint.append(0)		
	
	hyperVolume = HyperVolume(referencePoint)

	newFront = []
	for p in front:
		if (len(p) > 0):
			newFront.append(p)

	front = newFront

	for p in front:
		if (len(p) > 0):		
			if (args.v):
				print ('p: {0}'.format(p))
			for h in range(len(bounds)):
				r = bounds[h]['max'] - bounds[h]['min']
				if (args.v):
Example #21
0
def best_hvs(dag, keys):
	hv = HyperVolume(hv_ref)
	d, nz = fetch(dag, keys, "results")
	for k, v in d.iteritems():
		d[k] = max(hv.compute(map(nz, x)) for x in v)
	return dag, d
Example #22
0
    def select_points(self, front, xcand_nd, fhvals_nd, indices=None):

        # Use hypervolume contribution to select the next best
        # Step 1 - Normalize Objectives
        (M, l) = xcand_nd.shape
        temp_all = np.vstack((fhvals_nd, front))
        minpt = np.zeros(self.data.nobj)
        maxpt = np.zeros(self.data.nobj)
        for i in range(self.data.nobj):
            minpt[i] = np.min(temp_all[:,i])
            maxpt[i] = np.max(temp_all[:,i])
        normalized_front = np.asarray(normalize_objectives(front, minpt, maxpt))
        (N, temp) = normalized_front.shape
        normalized_cand_fh = np.asarray(normalize_objectives(fhvals_nd.tolist(), minpt, maxpt))

        # Step 2 - Make sure points already selected are not included in new points list
        if indices is not None:
            nd = range(N)
            dominated = []
            for index in indices:
                fvals = np.vstack((normalized_front, normalized_cand_fh[index,:]))
                (nd, dominated) = ND_Add(np.transpose(fvals), dominated, nd)
            normalized_front = fvals[nd,:]
            N = len(nd)

        # Step 3 - Compute Hypervolume Contribution
        hv = HyperVolume(1.1*np.ones(self.data.nobj))
        xnew = np.zeros((self.npts, l))
        if indices is None:
            indices = []
        hv_vals = -1*np.ones(M)
        hv_vals[indices] = -2
        for j in range(self.npts):
            # 3.1 - Find point with best HV improvement
            base_hv = hv.compute(normalized_front)
            for i in range(M):
                if hv_vals[i] != 0 and hv_vals[i] != -2:
                    nd = range(N)
                    dominated = []
                    fvals = np.vstack((normalized_front, normalized_cand_fh[i,:]))
                    (nd, dominated) = ND_Add(np.transpose(fvals), dominated, nd)
                    if dominated and dominated[0] == N: # Record is dominated
                        hv_vals[i] = 0
                    else:
                        new_hv = hv.compute(fvals[nd,:])
                        hv_vals[i] = new_hv - base_hv
            # vals = np.zeros((M,2))
            # vals[:,0] = xcand_nd[:,0]
            # vals[:,1] = hv_vals
            # print(vals)
            # 3.2 - Update selected candidate list
            index = np.argmax(hv_vals)
            xnew[j,:] = xcand_nd[index,:]
            indices.append(index)
            # 3.3 - Bar point from future selection and update non-dominated set
            hv_vals[index] = -2
            nd = range(N)
            dominated = []
            fvals = np.vstack((normalized_front, normalized_cand_fh[index,:]))
            (nd, dominated) = ND_Add(np.transpose(fvals), dominated, nd)
            normalized_front = fvals[nd,:]
            N = len(nd)
        return indices
Example #23
0
    def step_size_control(self):
        # ------------------------------ IMPORTANT -------------------------------
        # The step-size adaptation is of vital importance here!!!
        # general control rule to improve the stability: when a point tries to merge
        # into a front that dominates it in the last teration, the step-size
        # of this particular point is set to the mean step-size of the front.
        if hasattr(self, 'dominance_track_old'):
            for i, rank in enumerate(self.dominance_track):
                if rank != self.dominance_track_old[i]:
                    idx = self.fronts[rank]
                    mean_step_size = np.median(self.individual_step_size[idx])
                    self.individual_step_size[i] = mean_step_size

        self.dominance_track_old = deepcopy(self.dominance_track)

        #==============================================================================
        # step-size control method 1: detection of oscillating HV
        # works in very primitive case, needs more test
        # It requires hypervolume indicator computation, which is time-consuming
        #==============================================================================
        if 11 < 2:
            front = self.fitness[:, self.pareto_front]
            hv = HyperVolume(-self.ref[:, 0])
            self.hv_history[self.itercount % 10] = hv.compute(front.T.tolist())

            if (self.dominated_steer == 'NDS' and len(self.fronts) == 1) or \
                (not self.dominated_steer == 'NDS' and len(self.idx_ZU) == 0):
                if len(np.unique(self.hv_history)) == 2:
                    self.step_size *= 0.8

        #==============================================================================
        # Step size control method 2: cumulative step-size control
        # It works reasonably good among many tests and does not require too much
        # additional computational time
        #==============================================================================
        if self.itercount != 0:
            # the learning rate setting is largely afffected by the situation of
            # oscillation. The smaller this value is, the larger oscilation it
            # could handle. However, the smaller this value implies slower learning rate
            alpha = 0.7  # used for general purpose
            # alpha = 0.5     # currently used for Explorative Landscape Analysis
            c = 0.2
            if 11 < 2:
                if self.dominated_steer == 'NDS':
                    control_set = range(self.mu)
                else:
                    control_set = self.idx_P
            else:
                # TODO: verify this: applying the cumulative step-size control to all
                # the search points
                control_set = range(self.mu)

            if 1 < 2:
                from scipy.spatial.distance import cdist
                for idx in control_set:
                    self.inner_product[idx] = (1 - c) * self.inner_product[idx] + \
                        c * np.inner(self.path[:, idx], self.gradient_norm[:, idx])

                    if 11 < 2:
                        # step-size control rule similar to 1/5-rule in ES
                        if self.inner_product[idx] < 0:
                            self.individual_step_size[idx] *= alpha
                        else:
                            step_size_ = self.individual_step_size[idx] / alpha
                            self.individual_step_size[idx] = np.min(
                                [np.inf * self.step_size, step_size_])

                    # control the change rate of the step-size by passing the cumulative
                    # dot product into the exponential function
                    step_size_ = self.individual_step_size[idx] * \
                        np.exp((self.inner_product[idx])*alpha)

                    # put a upper bound on the adaptive step-size to avoid it becoming two
                    # large! The upper bound is calculated as the distance from one point
                    # to its nearest neighour in the decision space.
                    _ = [
                        i for i, front in enumerate(self.fronts)
                        if idx in front
                    ][0]
                    front = self.fronts[_]
                    if len(front) != 1:
                        __ = list(set(front) - set([idx]))
                        dis = cdist(np.atleast_2d(self.pop[:, idx]),
                                    self.pop[:, __].T)
                        step_size_ub = 0.7 * np.min(dis)
                    else:
                        step_size_ub = 4. * self.step_size

                    self.individual_step_size[idx] = np.min(
                        [step_size_ub, step_size_])
                    self.path[:, idx] = self.gradient_norm[:, idx]

        #==============================================================================
        # step-size control method 3: exploit the backtracing Line search to find
        # the optimal step-size setting. works but requires much more function evaluations
        #==============================================================================
        if 11 < 2:
            for idx in self.idx_P:
                self.individual_step_size[
                    idx] = self.__backtracing_line_search(idx)
Example #24
0
def front_objs(dag, keys):
	hv = HyperVolume(hv_ref)
	d, nz = fetch(dag, keys, "results")
	for k, v in d.iteritems():
		d[k] = max(v, key=lambda x: hv.compute(map(nz, x)))
	return d
Example #25
0
class MOO_HyperVolumeGradient:
    def __init__(self,
                 dim_d,
                 dim_o,
                 lb,
                 ub,
                 mu=40,
                 fitness=None,
                 gradient=None,
                 ref=None,
                 initial_step_size=0.1,
                 maximize=True,
                 sampling='uniform',
                 adaptive_step_size=True,
                 verbose=False,
                 **kwargs):
        """
        Hypervolume Indicator Gradient Ascent Algortihm class

        Parameters
        ----------

        dim_d : integer
            decision space dimension

        dim_o : integer
            objective space dimension

        lb : array
            lower bound of the search domain

        ub : array
            upper bound of the search domain

        mu :  integer
            the size of the Pareto approxiamtion set

        fitness : callable or list of callables (functions) (vector-evaluated) objective 
            function

        gradient : callable or list of callables (functions)
            the gradient (Jacobian) of the objective function

        ref : array or list 
            the reference point

        initial_step_size : numeric or string
            the inital step size, it could be a string subject to evaluation
            
        maximize : boolean or list of boolean
            Is the objective functions subject to maximization. If it is a list, it 
            specifys the maximization option per objective dimension

        sampling : string
            the method used in the initial sampling of the approximation set

        adaptive_step_size : boolean
            whether to enable to adaptive control for the step sizes, enabled by default

        verbose : boolean
            controls the verbosity

        kwargs: additional parameters, including:
            steer_dominated : string
                the method to steer (move) the dominated points. Available options: are
                'NDS', 'M1', 'M2', 'M3', 'M4', 'M5'. 'NDS' stands for Non-dominated 
                Sorting and enabled by default. For the detail of the methods here, 
                please refer to paper [2] below.

            enable_dominated : boolen, 
                whether to include dominated points population, for test purpose only

            normalize : boolean
                if the gradient is normalized or not

        References:

        .. [1] Wang H., Deutz A., Emmerich M.T.M. & Bäck T.H.W., Hypervolume Indicator 
            Gradient Ascent Multi-objective Optimization. In Lecture Notes in Computer 
            Science 10173:654-669. DOI: 10.1007/978-3-319-54157-0_44. In book: 
            Evolutionary Multi-Criterion Optimization, pp.654-669.

        .. [2] Wang H., Ren Y., Deutz A. & Emmerich M.T.M., On Steering 
            Dominated Points in Hypervolume Indicator Gradient Ascent for Bi-Objective 
            Optimization. In: Schuetze O., Trujillo L., Legrand P., Maldonado Y. (Eds.) 
            NEO 2015: Results of the Numerical and Evolutionary Optimization Workshop NEO 
            2015 held at September 23-25 2015 in Tijuana, Mexico. no. Studies in 
            Computational Intelligence 663. International Publishing: Springer.
                

        """
        self.dim_d = dim_d
        self.dim_o = dim_o
        self.mu = mu
        self.verbose = verbose

        assert self.mu > 1  # single point is not allowed
        assert sampling in ['uniform', 'LHS', 'grid']
        self.sampling = sampling

        # step-size settings
        self.step_size = eval(initial_step_size) if isinstance(
            initial_step_size, basestring) else initial_step_size
        self.individual_step_size = np.repeat(self.step_size, self.mu)
        self.adaptive_step_size = adaptive_step_size

        # setup boundary in decision space
        lb, ub = atleast_2d(lb), atleast_2d(ub)
        self.lb = lb.T if lb.shape[1] != 1 else lb
        self.ub = ub.T if ub.shape[1] != 1 else ub

        # are objective functions subject to maximization
        if hasattr(maximize, '__iter__') and len(maximize) != self.dim_o:
            raise ValueError(
                'maximize should have the same length as fitnessfuncs')
        elif isinstance(maximize, bool):
            maximize = [maximize] * self.dim_o
        self.maximize = np.atleast_1d(maximize)

        # setup reference point
        self.ref = np.atleast_2d(ref).reshape(-1, 1)
        self.ref[~self.maximize, 0] = -self.ref[~self.maximize, 0]

        # setup the fitness functions
        if isinstance(fitness, (list, tuple)):
            if len(fitness) != self.dim_o:
                raise ValueError('fitness_grad: shape {} is inconsistent \
                    with dim_o:{}'.format(len(fitness), self.dim_o))
            self.fitness_func = fitness
            self.vec_eval_fitness = False
        elif hasattr(fitness, '__call__'):
            self.fitness_func = fitness
            self.vec_eval_fitness = True
        else:
            raise Exception('fitness should be either a list of functions or \
                a vector evaluated function!')

        # setup fitness gradient functions
        if isinstance(gradient, (list, tuple)):
            if len(gradient) != self.dim_o:
                raise ValueError('fitness_grad: shape {} is inconsistent \
                    with dim_o: {}'.format(len(gradient), self.dim_o))
            self.grad_func = gradient
            self.vec_eval_grad = False
        elif hasattr(gradient, '__call__'):
            self.grad_func = self.__obj_dx(gradient)
            self.vec_eval_grad = True
        else:
            raise Exception(
                'fitness_grad should be either a list of functions or \
                a matrix evaluated function!')

        # setup the performance metric functions for convergence detection
        try:
            self.performance_metric_func = kwargs['performance_metric']
            self.target_perf_metric = kwargs['target']
        except KeyError:
            self.performance_metric_func = None
            self.target_perf_metric = None

        self.normalize = kwargs['normalize'] if kwargs.has_key(
            'normalize') else True
        self.enable_dominated = True if not kwargs.has_key('enable_dominated') \
            else kwargs['enable_dominated']
        self.maxiter = kwargs['maxiter'] if kwargs.has_key('maxiter') else inf

        # dominated_steer for moving non-differentiable or zero-derivative points
        try:
            self.dominated_steer = kwargs['dominated_steer']
            assert self.dominated_steer in ['M' + str(i)
                                            for i in range(1, 6)] + ['NDS']
        except KeyError:
            self.dominated_steer = 'NDS'

        self.pop = None
        self.pop_old = None  # for potential rollback

        # create some internal variables
        self.gradient = zeros((self.dim_d, self.mu))
        self.gradient_norm = zeros((self.dim_d, self.mu))

        # list recording on which condition the optimizer terminates
        self.stop_list = []

        # iteration counter
        self.itercount = 0

        # step-size control mechanism
        self.hv_history = np.random.rand(10)
        self.hv = HyperVolume(-self.ref[:, 0])

        # step-size control mechanism
        self.path = zeros((self.dim_d, self.mu))
        self.inner_product = zeros(self.mu)

        # Assuming on the smooth landspace that is differentiable almost everywhere
        # We need to record the extreme point that is non-differentiable
        self.non_diff_point = []

        # dynamic reference points
        self.dynamic_ref = []

        # dominance rank of the points
        self.dominance_track = zeros(self.mu, dtype='int')

        # record the working states of all search points
        self._states = array(['NOT-INIT'] * self.mu, dtype='|S5')

    def __str__(self):
        # TODO: implement this
        pass

    def __obj_dx(self, gradient):
        def obj_dx(x):
            dx = np.atleast_2d(gradient(x))
            dx = dx.T if dx.shape[0] != self.dim_o else dx
            return dx

        return obj_dx

    def init_sample(self, dim, n_sample, x_lb, x_ub, method=None):
        if method == None:
            method = self.sampling

        if method == 'LHS':
            # Latin Hyper Cube Sampling: Get evenly distributed sampling in R^dim
            samples = lhs(dim, samples=n_sample).T * (x_ub - x_lb) + x_lb

        elif method == 'uniform':
            samples = np.random.rand(dim, n_sample) * (x_ub - x_lb) + x_lb

        elif method == 'grid':
            n_sample_axis = np.ceil(sqrt(self.mu))
            self.mu = int(n_sample_axis**2)
            x1 = np.linspace(x_lb[0] + 0.05, x_ub[0] - 0.05, n_sample_axis)
            x2 = np.linspace(x_lb[1] + 0.05, x_ub[1] - 0.05, n_sample_axis)
            X1, X2 = np.meshgrid(x1, x2)
            samples = r_[X1.reshape(1, -1), X2.reshape(1, -1)]

        return samples

    def hypervolume_dx(self, positive_set, ref=None):
        if ref is None:
            ref = self.ref

        n_point = len(positive_set)
        pop = self.pop[:, positive_set]
        gradient_decision = zeros((self.dim_d, n_point))
        gradient_objective = self.hypervolume_df(positive_set, ref)

        for k in range(n_point):
            jacobian = self.grad_func(pop[:, k]) if self.vec_eval_grad else \
                array([grad(pop[:, k]) for grad in self.grad_func])

            # gradient vectors need to be reverted under minimization
            jacobian *= (-1)**np.atleast_2d(~self.maximize).T
            gradient_decision[:, k] = np.dot(gradient_objective[:, k],
                                             jacobian)

            # if inner(jacobian[0, :], jacobian[1, :]) / \
            #     (norm(jacobian[0, :]) * norm(jacobian[1, :])) == -1:
            #     self._states[positive_set[k]] = 'INCO'
            # else:
            #     if self._states[positive_set[k]] == 'INCO':
            #         pdb.set_trace()
            #     self._states[positive_set[k]] = 'COM'

        return gradient_decision

    def hypervolume_df(self, positive_set, ref):
        if self.dim_o == 2:
            gradient = self.__2D_hypervolume_df(positive_set, ref)
        else:
            gradient = self.__ND_hypervolume_df(positive_set, ref)

        return gradient

    def __2D_hypervolume_df(self, positive_set, ref):
        n_point = len(positive_set)
        gradient = zeros((self.dim_o, n_point))
        _grad = zeros((self.dim_o, n_point))

        # sort the pareto front with repsect to y1
        fitness = self._fitness[:, positive_set]
        idx = argsort(fitness[0, :])

        # sorted Pareto front
        sorted_fitness = fitness[:, idx]

        y1 = sorted_fitness[0, :]
        y2 = sorted_fitness[1, :]
        _grad[0, :] = y2 - r_[y2[1:], ref[1]]
        _grad[1, :] = y1 - r_[ref[0], y1[0:-1]]

        gradient[:, idx] = _grad
        return gradient

    def __ND_hypervolume_df(self, positive_set):
        # TODO: implement hypervolume gradient larger than 3D
        pass

    def check_population(self, fitness):
        n_point = fitness.shape[1]
        # find the pareto front, weakly and strictly dominated set
        weakly_dom_count = zeros(n_point)
        strictly_dom_count = zeros(n_point)

        for i in range(n_point - 1):
            p = fitness[:, i]
            for j in range(i + 1, n_point):
                q = fitness[:, j]
                if all(p > q):
                    strictly_dom_count[j] += 1
                elif all(p >= q) and not all(np.isclose(p, q)):
                    weakly_dom_count[j] += 1
                elif all(p < q):
                    strictly_dom_count[i] += 1
                elif all(p <= q) and not all(np.isclose(p, q)):
                    weakly_dom_count[i] += 1

        pareto_front = set(
            nonzero(
                np.bitwise_and(weakly_dom_count == 0,
                               strictly_dom_count == 0))[0])

        # strictly dominated set
        S = set(nonzero(strictly_dom_count != 0)[0])
        # weakly dominated set
        W = set(
            nonzero(
                np.bitwise_and(strictly_dom_count == 0,
                               weakly_dom_count != 0))[0])

        # find the subset of pareto front with duplicated components
        if self.dim_o == 2:  # simple case in 2-D: duplication is impossible
            N, D = set(pareto_front), set([])
        else:  # otherwise...
            D = set([])
            front_size = len(pareto_front)
            for i in range(front_size - 1):
                p = fitness[:, i]
                for j in range(i + 1, front_size):
                    q = fitness[:, j]
                    if np.any(p == q):
                        D |= set([i, j])
            N = set(pareto_front) - D

        # exterior set
        E = set(nonzero(np.any(fitness < self.ref, axis=0))[0])
        # interior set
        I = set(nonzero(np.all(fitness > self.ref, axis=0))[0])
        # boundary set
        B = set(nonzero(np.any(fitness == self.ref, axis=0))[0]) - E

        Z = E | S  # zero derivative set
        U = D | (W - E) | (B - S)  # undefined derivative set
        P = N & I  # positive derivative set

        return pareto_front, Z, U, P

    def evaluate(self, pop):
        pop = np.atleast_2d(pop)
        pop = pop.T if pop.shape[0] != self.dim_d else pop
        n_point = pop.shape[1]

        if self.vec_eval_fitness:
            fitness = array(
                [self.fitness_func(pop[:, i]) for i in range(n_point)]).T
        else:
            fitness = array([[func(pop[:, i]) for i in range(n_point)] \
                for func in self.fitness_func])
        # fitness values need to be reverted under minimization
        _fitness = fitness * (-1)**np.atleast_2d(~self.maximize).T
        return fitness, _fitness

    def fast_non_dominated_sort(self, fitness):

        fronts = []
        dominated_set = []
        n_domination = zeros(self.mu)

        for i in range(self.mu):
            p = fitness[:, i]
            p_dominated_set = []
            n_p = 0

            for j in range(self.mu):
                q = fitness[:, j]
                if i != j:
                    # TODO: verify this part
                    # check the strict domination
                    # allow for duplication points on the same front
                    if all(p >= q) and not all(p == q):
                        p_dominated_set.append(j)
                    elif all(p <= q) and not all(p == q):
                        n_p += 1

            dominated_set.append(p_dominated_set)
            n_domination[i] = n_p

        # create the first front
        fronts.append(nonzero(n_domination == 0)[0].tolist())
        n_domination[n_domination == 0] = -1

        i = 0
        while True:
            for p in fronts[i]:
                p_dominated_set = dominated_set[p]
                n_domination[p_dominated_set] -= 1

            _front = nonzero(n_domination == 0)[0].tolist()
            n_domination[n_domination == 0] = -1

            if len(_front) == 0:
                break
            fronts.append(_front)
            i += 1

        return fronts

    def steering_dominated(self, idx_ZU):
        # The rest points move along directions aggregated from function
        # gradients by scalarization dominated_steers
        gradient_ZU = zeros((self.dim_d, len(idx_ZU)))

        if self.dominated_steer in ['M4', 'M5']:
            mid_gap, slope = self.__mid_gap_pareto(self.pareto_front)
            __ = list(set(range(self.mu)) - set(self.pareto_front))
            nearst_gap_idx = self.__nearst_gap(mid_gap, __)

        if self.dominated_steer == 'M2' and self.itercount == 0:
            self.weights = np.random.rand(len(idx_ZU))
            self.weights_mapping = {t: i for i, t in enumerate(idx_ZU)}

        for i, k in enumerate(idx_ZU):
            # calculate the objective function gradients
            grads = np.atleast_2d(self.grad_func(self.pop[:, k])).T if self.vec_eval_grad \
                else array([grad(self.pop[:, k]) for grad in self.grad_func]).T
            # gradient vectors need to be reverted under minimization
            grads *= (-1)**np.atleast_2d(~self.maximize)

            # simple objective scalarization with equal weights
            if self.dominated_steer == 'M1':
                gradient_ZU[:, i] = np.sum(grads, axis=1)

            # objective scalarization with random (uniform) weights
            elif self.dominated_steer == 'M2':
                try:
                    idx = self.weights_mapping[k]
                    w = self.weights[idx]
                except:
                    w = np.random.rand()
                    self.weights_mapping[k] = len(self.weights)
                    self.weights = np.append(self.weights, w)
                # random weights generation per iteration
                if 11 < 2:
                    w = np.random.rand()
                gradient_ZU[:, i] = np.sum(grads * array([w, 1 - w]), axis=1)

            # Lara's method: scalarization after gradient normalization
            elif self.dominated_steer == 'M3':

                length = sqrt(np.sum(grads**2.0, axis=0))
                idx, = nonzero(length != 0)
                grads[:, idx] /= length[idx]
                gradient_ZU[:, i] = np.sum(grads, axis=1)

            # converge to the tangential point on the pareto front with the same
            # slope as the secant of the nearst gap
            elif self.dominated_steer == 'M4':
                assert self.dim_o == 2  # only work in 2-D

                gap_idx = nearst_gap_idx[i]
                m = slope[gap_idx]
                w = -m / (1 - m)
                gradient_ZU[:, i] = np.sum(grads * array([w, 1 - w]), axis=1)

            # converge to the middel point of the chord of the nearst gap
            elif self.dominated_steer == 'M5':
                assert self.dim_o == 2  # only work in 2-D

                if len(nearst_gap_idx) == 0:
                    break
                gap_idx = nearst_gap_idx[i]
                mid = mid_gap[:, gap_idx]
                p = self._fitness[:, k]
                tmp = 2 * array([mid - p
                                 ])  # remember we need a gradient descent here
                gradient_ZU[:, i] = np.sum(grads * tmp, axis=1)

        return gradient_ZU

    def __nearst_gap(self, mid_gap, idx):
        """
        assigning non-differential points to the nearst gap on the pareto front 
        evenly
        """
        nearst_gap_idx = []

        if len(mid_gap) != 0:
            if mid_gap.shape[1] > 1:
                if 1 < 2:
                    avail_gap = range(mid_gap.shape[1])
                    for i in idx:
                        if len(avail_gap) == 0:
                            avail_gap = range(len(mid_gap))
                        p = self._fitness[:, i].reshape(-1, 1)
                        dis = np.sum((mid_gap[:, avail_gap] - p)**2.0, axis=0)

                        if len(dis) != 0:
                            nearst_idx = avail_gap[nonzero(
                                dis == min(dis))[0][0]]
                            nearst_gap_idx.append(nearst_idx)

                        avail_gap = list(set(avail_gap) - set(nearst_gap_idx))
                else:
                    for i in idx:
                        p = self._fitness[:, i].reshape(-1, 1)
                        dis = np.sum((mid_gap - p)**2.0, axis=0)
                        if len(dis) != 0:
                            nearst_gap_idx.append(
                                nonzero(dis == min(dis))[0][0])

        return nearst_gap_idx

    def __mid_gap_pareto(self, idx_P):
        front = self._fitness[:, idx_P]

        # sort the front according to the first axis
        idx = argsort(front[0, :])
        sorted_front = front[:, idx]
        n_point = len(idx_P)
        slope = np.zeros(n_point)

        A = sorted_front[:, 0:-1]
        B = sorted_front[:, 1:]
        mid_gap = (A + B) / 2.0

        for i in range(n_point - 1):
            if np.isclose(B[0, i] - A[0, i], 0):
                slope[i] = np.inf
            else:
                slope[i] = (B[1, i] - A[1, i]) / (B[0, i] - A[0, i])

        return mid_gap, slope

    def check_stop_criteria(self):
        # TODO: implemement more stop criteria
        if self.itercount >= self.maxiter:
            self.stop_list += ['maxiter']

        if self.itercount > 0:
            # check for stationary necessary condition
            grad_len = sqrt(np.sum(self.gradient**2.0, axis=0))
            if np.all(grad_len < 1e-5):
                self.stop_list += ['stationary']

            # check if the step-size is too small
            if np.all(self.individual_step_size < 1e-5 *
                      np.max(self.ub - self.lb)):
                self.stop_list += ['step-size']

        # check if the performance metric target is reached
        if self.itercount > 0 and self.performance_metric_func is not None:
            PF = self.fitness[:, self.pareto_front].T
            self.perf_metric = self.performance_metric_func(PF)
            if self.perf_metric <= self.target_perf_metric:
                self.stop_list += ['target']

        return self.stop_list

    def __backtracing_line_search(self, idx):
        point = self.pop[:, idx]
        idx_point = self.pareto_front.index(idx)
        pareto = self.fitness[:, self.pareto_front]
        alpha = self.step_size
        beta = 1
        tau = 0.7

        gradient = self.gradient[:, idx]
        f0 = self.hv.compute(pareto.T)
        while True:
            new_point = point + alpha * gradient
            new_fitness, _ = self.evaluate(new_point[:, np.newaxis])
            pareto[:, idx_point] = new_fitness[:, 0]
            fnew = self.hv.compute(pareto.T)

            if fnew >= f0 + alpha * beta * np.linalg.norm(gradient)**2:
                break
            alpha *= tau

        return alpha

    def step_size_control(self):
        # ------------------------------ IMPORTANT -------------------------------
        # The step-size adaptation is of vital importance here!!!
        # general control rule to improve the stability: when a point tries to merge
        # into a front that dominates it in the last teration, the step-size
        # of this particular point is set to the mean step-size of the front.
        if hasattr(self, 'dominance_track_old'):
            for i, rank in enumerate(self.dominance_track):
                if rank != self.dominance_track_old[i]:
                    idx = self.fronts[rank]
                    mean_step_size = np.median(self.individual_step_size[idx])
                    self.individual_step_size[i] = mean_step_size

        self.dominance_track_old = deepcopy(self.dominance_track)

        #==============================================================================
        # step-size control method 1: detection of oscillating HV
        # works in very primitive case, needs more test
        # It requires hypervolume indicator computation, which is time-consuming
        #==============================================================================
        if 11 < 2:
            front = self.fitness[:, self.pareto_front]
            hv = HyperVolume(-self.ref[:, 0])
            self.hv_history[self.itercount % 10] = hv.compute(front.T.tolist())

            if (self.dominated_steer == 'NDS' and len(self.fronts) == 1) or \
                (not self.dominated_steer == 'NDS' and len(self.idx_ZU) == 0):
                if len(np.unique(self.hv_history)) == 2:
                    self.step_size *= 0.8

        #==============================================================================
        # Step size control method 2: cumulative step-size control
        # It works reasonably good among many tests and does not require too much
        # additional computational time
        #==============================================================================
        if self.itercount != 0:
            # the learning rate setting is largely afffected by the situation of
            # oscillation. The smaller this value is, the larger oscilation it
            # could handle. However, the smaller this value implies slower learning rate
            alpha = 0.7  # used for general purpose
            # alpha = 0.5     # currently used for Explorative Landscape Analysis
            c = 0.2
            if 11 < 2:
                if self.dominated_steer == 'NDS':
                    control_set = range(self.mu)
                else:
                    control_set = self.idx_P
            else:
                # TODO: verify this: applying the cumulative step-size control to all
                # the search points
                control_set = range(self.mu)

            if 1 < 2:
                from scipy.spatial.distance import cdist
                for idx in control_set:
                    self.inner_product[idx] = (1 - c) * self.inner_product[idx] + \
                        c * np.inner(self.path[:, idx], self.gradient_norm[:, idx])

                    if 11 < 2:
                        # step-size control rule similar to 1/5-rule in ES
                        if self.inner_product[idx] < 0:
                            self.individual_step_size[idx] *= alpha
                        else:
                            step_size_ = self.individual_step_size[idx] / alpha
                            self.individual_step_size[idx] = np.min(
                                [np.inf * self.step_size, step_size_])

                    # control the change rate of the step-size by passing the cumulative
                    # dot product into the exponential function
                    step_size_ = self.individual_step_size[idx] * \
                        np.exp((self.inner_product[idx])*alpha)

                    # put a upper bound on the adaptive step-size to avoid it becoming two
                    # large! The upper bound is calculated as the distance from one point
                    # to its nearest neighour in the decision space.
                    _ = [
                        i for i, front in enumerate(self.fronts)
                        if idx in front
                    ][0]
                    front = self.fronts[_]
                    if len(front) != 1:
                        __ = list(set(front) - set([idx]))
                        dis = cdist(np.atleast_2d(self.pop[:, idx]),
                                    self.pop[:, __].T)
                        step_size_ub = 0.7 * np.min(dis)
                    else:
                        step_size_ub = 4. * self.step_size

                    self.individual_step_size[idx] = np.min(
                        [step_size_ub, step_size_])
                    self.path[:, idx] = self.gradient_norm[:, idx]

        #==============================================================================
        # step-size control method 3: exploit the backtracing Line search to find
        # the optimal step-size setting. works but requires much more function evaluations
        #==============================================================================
        if 11 < 2:
            for idx in self.idx_P:
                self.individual_step_size[
                    idx] = self.__backtracing_line_search(idx)

    def constraint_handling(self, pop):
        # handling the simple box constraints
        lb, ub = self.lb[:, 0], self.ub[:, 0]
        for i in range(self.mu):
            p = pop[:, i]
            idx1, idx2 = p <= lb, p >= ub
            p[idx1] = lb[idx1]
            p[idx2] = ub[idx2]

    def restart_check(self):
        """
        In general, this function check if a subset of the approximation set should
        be re-sampled. The re-sampling condition is:
            stationary or non-differetiable but not on the global PF
        This function also checks if the hypervolume indicator gradient at a point
        should be projected with respect to a voilated box constraint.
        Perhaps put those in another function?
        """
        pareto_front = set(self.pareto_front)

        # check for non-differentiable points
        non_diff = set(
            nonzero(
                np.any(np.bitwise_or(np.isnan(self.gradient),
                                     np.isinf(self.gradient)),
                       axis=0))[0])

        # check for boundary points
        on_bound = set(
            nonzero(
                np.any(np.bitwise_or(self.pop == self.lb, self.pop == self.ub),
                       axis=0))[0])

        # TODO: gradient projection
        # if:
        #   1) the point is on the boundary
        #   2) it tries to violate the boundary constraint
        # then set the corresponding gradient component to zero
        if 1 < 2:
            ind = list(on_bound)
            for i in ind:
                point = self.pop[:, i]
                for j in range(self.dim_d):
                    if point[j] == self.lb[j] or point[j] == self.ub[j]:
                        self.gradient[j, i] = 0

        # check for zero gradient points
        zero_grad = set(
            nonzero(np.all(np.isclose(self.gradient, 0), axis=0))[0])

        # make the non-differentiable point that is on the pareto front stationary
        stationary_ind = zero_grad & pareto_front
        # resample the point that 1) on the boundart 2) has zero gradient
        # 3) has invalid gradient
        restart_ind = (zero_grad | non_diff) - pareto_front - stationary_ind

        return list(restart_ind), list(stationary_ind)

    def duplication_handling(self):
        # check for the potential duplication of stationary point in the popualation
        # add move such point to prevent duplication of points
        # TODO: verify this! possibly better dominated_steer exists
        fitness = self.fitness[:, self.pareto_front]
        checked_list = []

        for i, ind in enumerate(self.pareto_front):
            if i in checked_list:
                continue

            p = self.fitness[:, [ind]]
            bol = np.all(p == fitness, axis=0)
            bol[i] = False
            tmp = nonzero(bol)[0]

            # when duplication happens, move one duplicated point slightly
            if len(tmp) != 0:
                for k in tmp:
                    duplication_ind0 = self.pareto_front[i]
                    duplication_ind = self.pareto_front[k]

                    if sum(self.gradient[:, duplication_ind]) == 0 or \
                        sum(self.gradient[:, duplication_ind0]) == 0:

                        # compute its nearst neighour in decision space
                        point = self.pop[:, [duplication_ind]]
                        dis = np.sum(
                            (self.pop[:, self.pareto_front] - point)**2.0,
                            axis=0)
                        dis[dis == 0] = np.inf
                        res = nonzero(dis == min(dis))[0][0]
                        nearst_ind = self.pareto_front[res]

                        # move to the half way to its nearst neighour
                        # and penalize its stepsize
                        self.pop[:, duplication_ind] = (self.pop[:, nearst_ind] + \
                            self.pop[:, duplication_ind]) / 2.

                        self.individual_step_size[
                            duplication_ind] = self.step_size / 5.
                        checked_list += [k]

    def update_ref_point(self):
        # TODO: verify the performance difference of this to the fixed reference point
        self.dynamic_ref = []
        for front in self.fronts:
            front_fitness = self._fitness[:, front]
            ref_front = np.repeat(np.min(front_fitness), self.dim_o)
            ref_front += 0.1 * ref_front
            self.dynamic_ref += [ref_front]

    def step(self):

        # population initialization
        if self.pop is None:
            self.pop = self.init_sample(self.dim_d, self.mu, self.lb, self.ub)
            self._states = array(['INIT'] * self.mu)

        # evaluation
        self.fitness, self._fitness = self.evaluate(self.pop)

        # Combine non dominated sorting with HyperVolume gradient ascend
        if self.dominated_steer == 'NDS':
            self.fronts = self.fast_non_dominated_sort(self._fitness)
            self.pareto_front = self.fronts[0]

            # TODO: implement the dynamic reference point update:
            # self.update_ref_point()

            # compute the hypervolume gradient for each front layer
            for i, front in enumerate(self.fronts):
                self.gradient[:, front] = self.hypervolume_dx(front)

        # partition the collection of vectors according to Hypervolume indicator
        # differetiability
        else:
            pareto_front, Z, U, P = self.check_population(self._fitness)
            self.fronts = self.fast_non_dominated_sort(self._fitness)
            self.idx_P, self.idx_ZU = list(P), list(Z | U)
            self.pareto_front = list(pareto_front)

            __ = list(set(range(self.mu)) - set(self.pareto_front))

            # compute the hypervolume gradient for differentiable points
            # TODO: check why I abandon the idx_P here
            # gradient_P = self.hypervolume_dx(self.idx_P)
            gradient_P = self.hypervolume_dx(self.pareto_front)
            gradient_ZU = self.steering_dominated(__) \
                if self.enable_dominated and len(__) != 0 else []

            self.gradient[:, self.pareto_front] = gradient_P
            self.gradient[:, __] = gradient_ZU

        # check for stationary points and points needs to be resampled
        restart_ind, stationary_ind = self.restart_check()

        if 11 < 2:
            # do not allow restart... for debug purpose
            restart_ind = []

        for j, f in enumerate(self.fronts):
            self.dominance_track[f] = j

        # index set for gradient ascent
        ind = list(
            set(range(self.mu)) - set(restart_ind) - set(stationary_ind))

        # normalization should be performed after gradient correction
        self.gradient_norm[:, :] = self.gradient
        if self.normalize:
            length = sqrt(np.sum(self.gradient_norm**2.0, axis=0))
            idx, = nonzero(length != 0)
            self.gradient_norm[:, idx] /= length[idx]

        # call the step-size control function
        if self.adaptive_step_size:
            self.step_size_control()

        # Prior to gradient ascent, copy the population for potential rollback
        self.pop_old = copy(self.pop)

        # re-sample the point uniformly and reset the corresponding step-size to
        # the initial step-size
        # TODO: better restarting action rules and better implementation
        self.individual_step_size[restart_ind] = self.step_size
        self.pop[:,
                 restart_ind] = self.init_sample(self.dim_d, len(restart_ind),
                                                 self.lb, self.ub, 'uniform')

        # gradient ascending along the normalized gradient
        self.pop[:, ind] += self.individual_step_size[
            ind] * self.gradient_norm[:, ind]

        # constraint handling methods
        self.constraint_handling(self.pop)

        # repair the duplicated points
        self.duplication_handling()

        # incremental...
        self.itercount += 1

        return self.pareto_front, self.fitness

    def optimize(self):

        # Main iteration
        while not self.check_stop_criteria():
            self.step()

        return self.pareto_front, self.itercount
Example #26
0
    def __init__(self,
                 dim_d,
                 dim_o,
                 lb,
                 ub,
                 mu=40,
                 fitness=None,
                 gradient=None,
                 ref=None,
                 initial_step_size=0.1,
                 maximize=True,
                 sampling='uniform',
                 adaptive_step_size=True,
                 verbose=False,
                 **kwargs):
        """
        Hypervolume Indicator Gradient Ascent Algortihm class

        Parameters
        ----------

        dim_d : integer
            decision space dimension

        dim_o : integer
            objective space dimension

        lb : array
            lower bound of the search domain

        ub : array
            upper bound of the search domain

        mu :  integer
            the size of the Pareto approxiamtion set

        fitness : callable or list of callables (functions) (vector-evaluated) objective 
            function

        gradient : callable or list of callables (functions)
            the gradient (Jacobian) of the objective function

        ref : array or list 
            the reference point

        initial_step_size : numeric or string
            the inital step size, it could be a string subject to evaluation
            
        maximize : boolean or list of boolean
            Is the objective functions subject to maximization. If it is a list, it 
            specifys the maximization option per objective dimension

        sampling : string
            the method used in the initial sampling of the approximation set

        adaptive_step_size : boolean
            whether to enable to adaptive control for the step sizes, enabled by default

        verbose : boolean
            controls the verbosity

        kwargs: additional parameters, including:
            steer_dominated : string
                the method to steer (move) the dominated points. Available options: are
                'NDS', 'M1', 'M2', 'M3', 'M4', 'M5'. 'NDS' stands for Non-dominated 
                Sorting and enabled by default. For the detail of the methods here, 
                please refer to paper [2] below.

            enable_dominated : boolen, 
                whether to include dominated points population, for test purpose only

            normalize : boolean
                if the gradient is normalized or not

        References:

        .. [1] Wang H., Deutz A., Emmerich M.T.M. & Bäck T.H.W., Hypervolume Indicator 
            Gradient Ascent Multi-objective Optimization. In Lecture Notes in Computer 
            Science 10173:654-669. DOI: 10.1007/978-3-319-54157-0_44. In book: 
            Evolutionary Multi-Criterion Optimization, pp.654-669.

        .. [2] Wang H., Ren Y., Deutz A. & Emmerich M.T.M., On Steering 
            Dominated Points in Hypervolume Indicator Gradient Ascent for Bi-Objective 
            Optimization. In: Schuetze O., Trujillo L., Legrand P., Maldonado Y. (Eds.) 
            NEO 2015: Results of the Numerical and Evolutionary Optimization Workshop NEO 
            2015 held at September 23-25 2015 in Tijuana, Mexico. no. Studies in 
            Computational Intelligence 663. International Publishing: Springer.
                

        """
        self.dim_d = dim_d
        self.dim_o = dim_o
        self.mu = mu
        self.verbose = verbose

        assert self.mu > 1  # single point is not allowed
        assert sampling in ['uniform', 'LHS', 'grid']
        self.sampling = sampling

        # step-size settings
        self.step_size = eval(initial_step_size) if isinstance(
            initial_step_size, basestring) else initial_step_size
        self.individual_step_size = np.repeat(self.step_size, self.mu)
        self.adaptive_step_size = adaptive_step_size

        # setup boundary in decision space
        lb, ub = atleast_2d(lb), atleast_2d(ub)
        self.lb = lb.T if lb.shape[1] != 1 else lb
        self.ub = ub.T if ub.shape[1] != 1 else ub

        # are objective functions subject to maximization
        if hasattr(maximize, '__iter__') and len(maximize) != self.dim_o:
            raise ValueError(
                'maximize should have the same length as fitnessfuncs')
        elif isinstance(maximize, bool):
            maximize = [maximize] * self.dim_o
        self.maximize = np.atleast_1d(maximize)

        # setup reference point
        self.ref = np.atleast_2d(ref).reshape(-1, 1)
        self.ref[~self.maximize, 0] = -self.ref[~self.maximize, 0]

        # setup the fitness functions
        if isinstance(fitness, (list, tuple)):
            if len(fitness) != self.dim_o:
                raise ValueError('fitness_grad: shape {} is inconsistent \
                    with dim_o:{}'.format(len(fitness), self.dim_o))
            self.fitness_func = fitness
            self.vec_eval_fitness = False
        elif hasattr(fitness, '__call__'):
            self.fitness_func = fitness
            self.vec_eval_fitness = True
        else:
            raise Exception('fitness should be either a list of functions or \
                a vector evaluated function!')

        # setup fitness gradient functions
        if isinstance(gradient, (list, tuple)):
            if len(gradient) != self.dim_o:
                raise ValueError('fitness_grad: shape {} is inconsistent \
                    with dim_o: {}'.format(len(gradient), self.dim_o))
            self.grad_func = gradient
            self.vec_eval_grad = False
        elif hasattr(gradient, '__call__'):
            self.grad_func = self.__obj_dx(gradient)
            self.vec_eval_grad = True
        else:
            raise Exception(
                'fitness_grad should be either a list of functions or \
                a matrix evaluated function!')

        # setup the performance metric functions for convergence detection
        try:
            self.performance_metric_func = kwargs['performance_metric']
            self.target_perf_metric = kwargs['target']
        except KeyError:
            self.performance_metric_func = None
            self.target_perf_metric = None

        self.normalize = kwargs['normalize'] if kwargs.has_key(
            'normalize') else True
        self.enable_dominated = True if not kwargs.has_key('enable_dominated') \
            else kwargs['enable_dominated']
        self.maxiter = kwargs['maxiter'] if kwargs.has_key('maxiter') else inf

        # dominated_steer for moving non-differentiable or zero-derivative points
        try:
            self.dominated_steer = kwargs['dominated_steer']
            assert self.dominated_steer in ['M' + str(i)
                                            for i in range(1, 6)] + ['NDS']
        except KeyError:
            self.dominated_steer = 'NDS'

        self.pop = None
        self.pop_old = None  # for potential rollback

        # create some internal variables
        self.gradient = zeros((self.dim_d, self.mu))
        self.gradient_norm = zeros((self.dim_d, self.mu))

        # list recording on which condition the optimizer terminates
        self.stop_list = []

        # iteration counter
        self.itercount = 0

        # step-size control mechanism
        self.hv_history = np.random.rand(10)
        self.hv = HyperVolume(-self.ref[:, 0])

        # step-size control mechanism
        self.path = zeros((self.dim_d, self.mu))
        self.inner_product = zeros(self.mu)

        # Assuming on the smooth landspace that is differentiable almost everywhere
        # We need to record the extreme point that is non-differentiable
        self.non_diff_point = []

        # dynamic reference points
        self.dynamic_ref = []

        # dominance rank of the points
        self.dominance_track = zeros(self.mu, dtype='int')

        # record the working states of all search points
        self._states = array(['NOT-INIT'] * self.mu, dtype='|S5')
Example #27
0
def computeHyperVolumeIns(maxMinValues, fronteras, instance):
	minObj1, maxObj1 = maxMinValues[0][0], maxMinValues[0][1]
	minObj2, maxObj2 = maxMinValues[1][0], maxMinValues[1][1]
	#print minObj1, maxObj1, minObj2, maxObj2
	difObj1 = maxObj1 - minObj1
	difObj2 = maxObj2 - minObj2
	#Se guarda en esta lista debido a que los resultados iran dentro del objeto... en normalizedValues...
	listaHyperVol = []
	normalizados = []
	print len(fronteras)
	h = input("")
	listOfIndexes = []
	for i in range(len(fronteras)):
		print i
		if i == instance:
			print "largo", len(fronteras[i])
			#Cada 10 generaciones calculo HV, al final deberia tener 1300 valores de HV
			
			for j in range(1,len(fronteras[i])):
				normValues = []
				listOfIndexes.append(j)
				#print j
				#h = input("DEBERIA SER 13.000 SI ES ASÍ ESTOY BIEN")
				for elemento in fronteras[i][j]:
					#h = input(". . . .")
					values = []
					cost1 = elemento[0]
					cost2 = elemento[1]
					if difObj1 == 0:
						valueObj1 = 0
					else:
						valueObj1 = (cost1 - minObj1)/difObj1
					if difObj2 == 0:
						valueObj2 = 0
					else: 
						valueObj2 = (cost2 - minObj2)/difObj2
						values.append(valueObj1), values.append(valueObj2)
						normValues.append(values)
				normalizados.append(normValues)

		#print "valores normalizados"
		#for value in normValues:
		#   print value
				#normalizados.append(normValues)
		#self.normalizedValues.append(normValues)
	#h = input("")
	#en normValues tengo toda la caca
	print "len norm:" ,len(normalizados)
	#for i in range(len(normalizados)):
	##  print normalizados[i]
	# h = input(". . . .")
	referencePoint = [2,2]
	for i in range(len(normalizados)):
		print "Calculando HV . . . .", i 
		hv = HyperVolume(referencePoint)
		volume = hv.compute(normalizados[i])
		listaHyperVol.append(volume)
	#print volume
	#io = input(". . . .")
		
	results = []
	results.append(listOfIndexes)
	results.append(listaHyperVol)
	#print "Valores de HV para cada run: ", listaHyperVol    
	print len(listOfIndexes)
	print len(listaHyperVol)
	return results
Example #28
0
	if (front != []):	
		if (args.v):
			print 'Front: {0}'.format(i)
			for point in front:
				print(point)
			print 'hvBounds:\n{0}'.format(hvBounds)
	
		i += 1 
		
		
		

		referencePoint = [20,200]						
	
	
		hyperVolume = HyperVolume(referencePoint)
		hv = hyperVolume.compute(front)	
		hvAggr += hv
		hvTotal += 1

		if (args.v):	
			print 'Hypervolume:\n {0}'.format(hv)

if (args.v):
	print 'Average hypervolume (hvAggr/hvTotal):\n {0}/{1} = {2}'.format(hvAggr,hvTotal,hvAggr/hvTotal)



#print 'Results:'
for line in agrLines:
	lineString = ''
Example #29
0
def front_objs(dag, keys):
    hv = HyperVolume(hv_ref)
    d, nz = fetch(dag, keys, "results", lambda x: x)
    for k, v in d.iteritems():
        d[k] = max(v, key=lambda x: hv.compute(map(nz, x)))
    return d
Example #30
0
def best_hvs(dag, keys):
    hv = HyperVolume(hv_ref)
    d, nz = fetch(dag, keys, "results", lambda x: x)
    for k, v in d.iteritems():
        d[k] = max(hv.compute(map(nz, x)) for x in v)
    return dag, d
Example #31
0
                              alpha=0.5)
    line10, line11 = ax1.plot([], [],
                              'or', [], [],
                              'ob',
                              ms=8,
                              mec='none',
                              alpha=0.5)

#    line00.set_clip_on(False)
#    line01.set_clip_on(False)
#    line10.set_clip_on(False)
#    line11.set_clip_on(False)

# ------------------------------- The animation ----------------------------------
# hyper-volume calculation
hv = HyperVolume(ref2)

delay = 10
toggle = True
t_start = time.time()
fps_movie = 15


def init():
    fps_text.set_text('')
    hv_text.set_text('')

    if not plot_layers:
        line00.set_data([], [])
        #        line01.set_data([], [])
        line10.set_data([], [])