def uniform_weighting(self, n_regions=5, perc=95): from numpy import union1d as union from numpy import intersect1d as intersect u, s = self.u, self.s u_b = np.linspace(0, np.percentile(u, perc), n_regions) s_b = np.linspace(0, np.percentile(s, perc), n_regions) regions, weights = {}, np.ones(len(u)) for i in range(n_regions): if i == 0: region = intersect(np.where(u < u_b[i + 1]), np.where(s < s_b[i + 1])) elif i < n_regions - 1: lower_cut = union(np.where(u > u_b[i]), np.where(s > s_b[i])) upper_cut = intersect(np.where(u < u_b[i + 1]), np.where(s < s_b[i + 1])) region = intersect(lower_cut, upper_cut) else: region = union( np.where(u > u_b[i]), np.where(s > s_b[i])) # lower_cut for last region regions[i] = region if len(region) > 0: weights[region] = n_regions / len(region) # set weights accordingly such that each region has an equal overall contribution. self.weights = weights * len(u) / np.sum(weights) self.u_b, self.s_b = u_b, s_b
def metrics_inversion_violations( self, ref_point, volume_max, num_fronts, num_exec, name_var, violations, ): """Extract the metrics only from the pareto front, inverts the inversion made to convert form maximization to minimization, organizes metrics and data for visualization. Returns: list: Array with metrics: "Hypervolume" Solution X "X Total throughput [kg]", "X Max total backlog [kg]", "X Mean total backlog [kg]", "X Median total backlog [kg]","X Min total backlog [kg]", "X P(total backlog ≤ 0 kg)","X Max total inventory deficit [kg]", "X Mean total inventory deficit [kg]", "X Median total inventory deficit [kg]", "X Min total inventory deficit [kg]" Solution Y "Y Total throughput [kg]", "Y Max total backlog [kg]", "Y Mean total backlog [kg]", "Y Median total backlog [kg]","Y Min total backlog [kg]", "Y P(total backlog ≤ 0 kg)","Y Max total inventory deficit [kg]", "Y Mean total inventory deficit [kg]", "Y Median total inventory deficit [kg]", "Y Min total inventory deficit [kg]" Pareto Front """ # Indexes try: ix_vio = np.where(violations == 0)[0] ix_par = np.where(self.fronts == 0)[0] ix_pareto = np.intersect(ix_vio, ix_par) except: ix_pareto = np.where(self.fronts == 0)[0] # Calculates hypervolume try: hv = hypervolume(points=self.objectives_raw[ix_pareto]) hv_vol_norma = hv.compute(ref_point) / volume_max except Exception as e: print(e, "setting hv_vol_norma = 0") hv_vol_norma = 0 # except ValueError: # hv_vol_norma = 0 metrics_exec = [num_exec, name_var, hv_vol_norma] # Reinverts again the throughput, that was modified for minimization self.objectives_raw[:, 0] = self.objectives_raw[:, 0] * (-1.0) # Metrics ix_best_min = np.argmin(self.objectives_raw[:, 0][ix_pareto]) ix_best_max = np.argmax(self.objectives_raw[:, 0][ix_pareto]) metrics_id = [ self.extract_metrics(ix_best_min, num_fronts, num_exec, "X", name_var, ix_pareto) ] metrics_id.append( self.extract_metrics(ix_best_max, num_fronts, num_exec, "Y", name_var, ix_pareto)) # Plot Data metrics_exec.append(self.objectives_raw[ix_pareto]) return metrics_exec, metrics_id
def apply_rules123(vstate, cons, bonds, bos): """ """ # Rule 1: For each atom in a bond, if the bond order bo is determined, # con is deducted by 1 and av is deducted by bo ####### # Since no bond orders are known at the start we don't start looping through # bonds # Rule 2: For one atom, if its con equals to av, the bond orders # of its unassigned bonds are set to 1 # Rule 3: For one atom, if its con equals to 1, the bond order # of the last bond is set to av while True: # apply rule 2 # where unassigned bonds (bo == 0) of atoms of con == av are set to single valence where_con_is_av = np.where(np.intersect1d(vstate == cons, vstate != 0)) bos[np.intersect(find_bonds(where_con_is_av), bos==0)] = 1 # apply rule 3 # where unassigned bonds (bo == 0) of atoms of con == 1 are set to av where_con_is_one = np.where(cons == 1) bonds_where_con1 = find_bonds(where_con_is_one) bos[bonds_where_con1] = vstate[bonds_where_con1] # apply rule 1 # where bo is determined (!= 0), substract av by bo # then subtract con by 1 at this atoms where_bo_known = np.where(bos != 0)[0] sub_bos_by_av, sub_con_by_1 = find_atoms(vstate.shape[0], bonds[where_bo_known], bos[where_bo_known]) vstate -= sub_bos_by_av cons -= sub_con_by_1
def apply_rules123(vstate, cons, bonds, bos): """ """ # Rule 1: For each atom in a bond, if the bond order bo is determined, # con is deducted by 1 and av is deducted by bo ####### # Since no bond orders are known at the start we don't start looping through # bonds # Rule 2: For one atom, if its con equals to av, the bond orders # of its unassigned bonds are set to 1 # Rule 3: For one atom, if its con equals to 1, the bond order # of the last bond is set to av while True: # apply rule 2 # where unassigned bonds (bo == 0) of atoms of con == av are set to single valence where_con_is_av = np.where(np.intersect1d(vstate == cons, vstate != 0)) bos[np.intersect(find_bonds(where_con_is_av), bos == 0)] = 1 # apply rule 3 # where unassigned bonds (bo == 0) of atoms of con == 1 are set to av where_con_is_one = np.where(cons == 1) bonds_where_con1 = find_bonds(where_con_is_one) bos[bonds_where_con1] = vstate[bonds_where_con1] # apply rule 1 # where bo is determined (!= 0), substract av by bo # then subtract con by 1 at this atoms where_bo_known = np.where(bos != 0)[0] sub_bos_by_av, sub_con_by_1 = find_atoms(vstate.shape[0], bonds[where_bo_known], bos[where_bo_known]) vstate -= sub_bos_by_av cons -= sub_con_by_1
def solve(self, time_points, tor_min, tor_max): # Check that the model has first been calibrated. #if ~isfield(self.variables, 'meanHead_calib') || ~isfield(self.parameters,'Const') if ~isfield(self.variables, 'meanHead_calib'): print 'The model does not appear to have first been calibrated. Please calibrate the model before running a simulation.' # Check the time points are all unique if length(unique(time_points))!=len(time_points): print 'The time points for simulation must be unique.' # Create logical matrix indicating if the time point is an # observation. If true, then the data point is used to update # the exponential smoothing. Else, a forecast is made using # the exonential smoothing using the smooth terms from the # previous observation. # To calculate this vector, the following steps are undertaken: # 1. Unique time points are derived from the simulation time # points and the observed time points within the calibration # period. # 2. Find the time points within the unique list that are # observations. # 3. Create a logical vector with the time points from 2 as true # 4. Assign vector from 3 to the selfect for access within the # objective def. time_points_all = time_points self.variables.calibration_time_points time_points_all = np.unique(np.sort(time_points_all)) dummy, ind = np.intersect(time_points_all, self.variables.calibration_time_points) self.variables.isObsTimePoints = False(np.shape(time_points_all)) self.variables.isObsTimePoints[ind] = True # Create vector of the time steps for only the time points with # observed heads. self.variables.delta_t = np.diff(time_points_all(self.variables.isObsTimePoints)) ./ 365. self.variables.meanDelta_t = np.mean(self.variables.delta_t) # Convert logical to double for MEX input self.variables.isObsTimePoints = np.double(self.variables.isObsTimePoints) # Set percentile for noise Pnoise = 0.95 # Calc deterministic component of the head at 'time_points_all'. params = getParameters(self) self.variables.doingCalibration = False dummy, headtmp, self.variables.h_forecast = objectivedef(self, params, time_points_all) # Filter 'head' to only those time points input to the def. dummy, ind = np.intersect(time_points_all, time_points) headtmp = [time_points, headtmp[ind,:]] if np.shape(params)[2]>1: head = np.zeros([np.shape(headtmp)[1], np.shape(headtmp)[2], np.shape(params)[2]]) noise = np.zeros([np.shape(headtmp)[1], 3, np.shape(params)[2]]) head[:,:,1] = headtmp for ii in range(np.shape(params)[2]): # Calc deterministic component of the head at 'time_points_all'. params = getParameters(self) self.variables.doingCalibration = False dummy, headtmp, self.variables.h_forecast = objectivedef(self, params[:,ii], time_points_all) # Filter 'head' to only those time points input to the # def. dummy, ind = np.intersect(time_points_all, time_points) head[:,:,ii] = [time_points, head[ind,:]] # Create noise component output. if isfield(self.variables, 'sigma_n'): noise[:,:,ii] = [head[:,1,ii], np.ones([np.shape(head)[1], 2]) .* np.norminv(Pnoise, 0, 1) .* self.variables.sigma_n[ii]] else: noise[:,:,ii] = [head[:,1,ii], np.zeros([np.shape(head)[1], 2])] else: head = headtmp # Create noise component output. if isfield(self.variables, 'sigma_n'): noise[:,:] = [head[:,1], np.norminv(Pnoise, 0, 1) .* self.variables.sigma_n(np.ones([np.shape(head)[1], 2)])] else: noise[:,:] = [head[:,1], np.zeros([np.shape(head)[1], 2])] # Assign column names colnames = ['time', 'h_star'] return head, colnames, noise