def retrain(self, data): # Resets the rule base self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order) # Regenerate partitions data_range = np.max(data) - np.min(data) if self.bound_type == 'min-max': self.partitions = pu.generate_t_partitions( self.nsets, np.min(data) - data_range * 0.1, np.max(data) + data_range * 0.1) else: # self.bound_type == 'mu-sigma' self.partitions = pu.generate_t_partitions( self.nsets, np.mean(data) - np.std(data) * self.sigma_multiplier, np.mean(data) + np.std(data) * self.sigma_multiplier) # Populate the rule base for i in range(len(data) - self.order): window = data[i:(i + self.order + 1)] # Get pertinences pertinence_list = pf.t_pertinence_list(window, self.partitions) # Build rule rule = rbm.generate_rule(pertinence_list) # Add rule to the rule base rbm.add_rule(self.rule_base, rule, self.nsets, self.order)
def fit(self, data): x = data[-1] # Verify the Universe of discourse if self.bound_type == 'min-max': data_range = self.max_val - self.min_val lb = self.min_val - data_range * 0.1 ub = self.max_val + data_range * 0.1 elif self.bound_type == 'mu-sigma': lb = self.mu - self.sigma * self.sigma_multiplier ub = self.mu + self.sigma * self.sigma_multiplier if not self.partitions: self.partitions = pu.generate_t_partitions(self.nsets, lb, ub) self.centers = [c[1] for c in self.partitions] else: # Update centers if self.partitionner == 'knn': # Find the closest center to data closest = np.argmin([np.abs(x - c)] for c in self.centers) # Update the center self.centers[closest] = ((self.centers[closest] * self.samples_per_cluster[closest]) + x) / \ (self.samples_per_cluster[closest] + 1) self.samples_per_cluster[closest] += 1 elif self.partitionner == 'uniform': mock_partitions = pu.generate_t_partitions(self.nsets, lb, ub) self.centers = [c[1] for c in mock_partitions] # Now we have two options # 1) Generate new partitions and translate rules if self.mod == 'translate': self.translate() else: # 2) Generate new partitions and do nothing self.partitions = pu.generate_t_partitions_from_centers( self.centers) # Now we have three options # 1) Do nothing # 2) Delete old rules # 3) Delete bad rules if self.del_strategy == 'delete_bad': self.delete_bad(x) # elif self.del_strategy == 'delete_old': # pass # self.delete_old() # Get pertinences pertinence_list = pf.t_pertinence_list(data, self.partitions) # Build rule rule = rbm.generate_rule(pertinence_list) # Add rule to the rule base rbm.add_rule(self.rule_base, rule, self.nsets, self.order)
def translate(self, data): if not self.rule_base: self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order) if self.bound_type == 'min-max': data_range = np.max(data) - np.min(data) lb = np.min(data) - data_range * 0.5 ub = np.max(data) + data_range * 0.5 else: # self.bound_type == 'mu_sigma' lb = np.mean(data) - np.std(data) * self.sigma_multiplier ub = np.mean(data) + np.std(data) * self.sigma_multiplier if self.partitions: old_partitions = self.partitions[:] else: old_partitions = pu.generate_t_partitions(self.nsets, lb, ub) # 1) Compute the new partitions self.partitions = pu.generate_t_partitions(self.nsets, lb, ub) # 2) Verify the pertinence of the old sets centers with respect to the new partitions old_centers = [p[1] for p in old_partitions] f = fuzzify_x_list_t(old_centers, self.partitions) # 3) Compute the final set of partitions up_partitions = self.partitions + [ old_partitions[i] for i in range(len(f)) if f[i] < 0 ] up_partitions = sorted(up_partitions, key=lambda n_p: n_p[1]) self.partitions = up_partitions self.nsets = len(self.partitions) # 4) Compute the mappings required to update the rule base map_old_new = fuzzify_x_list_t(old_centers, self.partitions) # 5) Update rules self.rule_base = rbm.update_rule_base(self.rule_base, map_old_new, np.arange(len(self.partitions)), self.order) # 6) Add new rule # Get pertinences pertinence_list = pf.t_pertinence_list( data[(len(data) - self.order - 1):], self.partitions) # Build rule rule = rbm.generate_rule(pertinence_list) # Add rule to the rule base rbm.add_rule(self.rule_base, rule, self.nsets, self.order)
def fit(self, data): # Resets the rule base self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order) # Compute the universe of discourse data_range = np.max(data) - np.min(data) if self.bound_type == 'min-max': lb = np.min(data) - data_range * 0.1 ub = np.max(data) + data_range * 0.1 else: # self.bound_type == 'mu-sigma' lb = np.mean(data) - np.std(data) * self.sigma_multiplier ub = np.mean(data) + np.std(data) * self.sigma_multiplier # Regenerate partitions if self.partitionner == 'uniform': self.partitions = pu.generate_t_partitions(self.nsets, lb, ub) else: # self.bound_type == 'knn' self.partitions = pu.generate_t_partitions_knn(data, self.nsets, lb, ub) # Populate the rule base for i in range(len(data)-self.order): window = data[i:(i+self.order+1)] # Get pertinences pertinence_list = pf.t_pertinence_list(window, self.partitions) # Build rule rule = rbm.generate_rule(pertinence_list) # Add rule to the rule base rbm.add_rule(self.rule_base, rule, self.nsets, self.order)
def __init__(self, nsets, order, lb, ub): self.nsets = nsets self.order = order self.lb = lb self.ub = ub self.fuzzy_sets = np.arange(self.nsets) self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order) self.partitions = pu.generate_t_partitions(nsets, lb, ub) self.alpha_cut = 0