def forecast(self, ndata, **kwargs): ret = [] l = len(ndata) if l <= self.max_lag: return ndata for k in np.arange(self.max_lag, l + 1): flrgs = self.generate_lhs_flrg(ndata[k - self.max_lag:k]) tmp = [] for flrg in flrgs: if flrg.get_key() not in self.flrgs: if len(flrg.LHS) > 0: tmp.append(self.sets[flrg.LHS[-1]].centroid) else: flrg = self.flrgs[flrg.get_key()] tmp.append(flrg.get_midpoint(self.sets)) ret.append(np.nanmean(tmp)) return ret
def forecast(self, ndata, **kwargs): explain = kwargs.get('explain', False) ret = [] l = len(ndata) if not explain else self.max_lag + 1 if l < self.max_lag: return ndata for k in np.arange(self.max_lag, l+1): if explain: print("Fuzzyfication \n") if not kwargs.get('fuzzyfied', False): flrgs = self.generate_lhs_flrg(ndata[k - self.max_lag: k], explain) else: flrgs = self.generate_lhs_flrg_fuzzyfied(ndata[k - self.max_lag: k], explain) if explain: print("Rules:\n") tmp = [] for flrg in flrgs: if flrg.get_key() not in self.flrgs: if len(flrg.LHS) > 0: mp = self.partitioner.sets[flrg.LHS[-1]].centroid tmp.append(mp) if explain: print("\t {} -> {} (Naïve)\t Midpoint: {}\n".format(str(flrg.LHS), flrg.LHS[-1], mp)) else: flrg = self.flrgs[flrg.get_key()] mp = flrg.get_midpoint(self.partitioner.sets) tmp.append(mp) if explain: print("\t {} \t Midpoint: {}\n".format(str(flrg), mp)) final = np.nanmean(tmp) ret.append(final) if explain: print("Deffuzyfied value: {} \n".format(final)) return ret
def generate_flrg_fuzzyfied(self, data): l = len(data) for k in np.arange(self.max_lag, l): if self.dump: print("FLR: " + str(k)) sample = data[k - self.max_lag:k] rhs = data[k] flrgs = self.generate_lhs_flrg_fuzzyfied(sample) for flrg in flrgs: if flrg.get_key() not in self.flrgs: self.flrgs[flrg.get_key()] = flrg for st in rhs: self.flrgs[flrg.get_key()].append_rhs(st)
def generate_flrg(self, data): l = len(data) for k in np.arange(self.order, l): if self.dump: print("FLR: " + str(k)) sample = data[k - self.order:k] rhs_sample = data[k] rhs = self.fuzzyfication(rhs_sample) flrgs = self.generate_lhs_flrg(sample) for flrg in flrgs: if flrg.get_key() not in self.flrgs: self.flrgs[flrg.get_key()] = flrg for st in rhs: self.flrgs[flrg.get_key()].append_rhs(st)
def generate_flrg(self, data): l = len(data) for k in np.arange(self.max_lag, l): lags = {} if self.dump: print("FLR: " + str(k)) sample = data[k - self.max_lag: k] rhs = FuzzySet.fuzzyfy(data[k], partitioner=self.partitioner, mode="sets", alpha_cut=self.alpha_cut) flrgs = self.generate_lhs_flrg(sample) for flrg in flrgs: if flrg.get_key() not in self.flrgs: self.flrgs[flrg.get_key()] = flrg; for st in rhs: self.flrgs[flrg.get_key()].append_rhs(st)
def generate_flrg(self, data): l = len(data) for k in np.arange(self.max_lag, l): if self.dump: print("FLR: " + str(k)) sample = data[k - self.max_lag:k] rhs = [ key for key in self.partitioner.ordered_sets if self.sets[key].membership(data[k]) > self.alpha_cut ] flrgs = self.generate_lhs_flrg(sample) for flrg in flrgs: if flrg.get_key() not in self.flrgs: self.flrgs[flrg.get_key()] = flrg for st in rhs: self.flrgs[flrg.get_key()].append_rhs(st)
def generate_flrg(self, data): _tmp_steps = self.standard_horizon - 1 l = len(data) for k in np.arange(self.max_lag, l - _tmp_steps): if self.dump: print("FLR: " + str(k)) sample = data[k - self.max_lag:k] rhs = self.partitioner.fuzzyfy(data[k + _tmp_steps], mode="sets", alpha_cut=self.alpha_cut) flrgs = self.generate_lhs_flrg(sample) for flrg in flrgs: if flrg.get_key() not in self.flrgs: self.flrgs[flrg.get_key()] = flrg for st in rhs: self.flrgs[flrg.get_key()].append_rhs(st)
def forecast(self, ndata, **kwargs): ret = [] l = len(ndata) if l <= self.order: return ndata for k in np.arange(self.order, l + 1): sample = ndata[k - self.order:k] flrgs = self.generate_lhs_flrg(sample) memberships = [] midpoints = [] for flrg in flrgs: if flrg.get_key() not in self.flrgs: midpoints.append(self.sets[flrg.LHS[-1]].centroid) else: f = self.flrgs[flrg.get_key()] if f.midpoint is None: f.midpoint = np.nanmean(f.get_midpoints(self.sets), axis=0) midpoints.append(f.get_midpoint(self.sets)) if self.defuzzy == 'weighted': mvs = [] for i in np.arange(self.order): mvs.append(self.sets[flrg.LHS[i]].membership(sample[i])) memberships.append(np.prod(mvs)) mv_midps = [x * y for x, y in zip(midpoints, memberships)] ret.append(np.sum(mv_midps, axis=0) / np.sum(memberships)) elif self.defuzzy == 'mean': ret.append(np.nanmean(midpoints, axis=0)) return ret
def forecast(self, ndata, **kwargs): ret = [] l = len(ndata) # if l <= self.order: # return ndata for k in np.arange(self.order, l + 1): sample = ndata[k - self.order:k] flrgs = self.generate_lhs_flrg(sample) memberships = [] midpoints = [] for flrg in flrgs: if flrg.get_key() not in self.flrgs: if len(flrg.LHS) > 0: mp = self.partitioner.sets[flrg.LHS[-1]].centroid mv = self.partitioner.sets[flrg.LHS[-1]].membership( sample[-1]) midpoints.append(mp) memberships.append(mv) else: f = self.flrgs[flrg.get_key()] mp = f.get_midpoint(self.partitioner.sets) mv = f.get_membership(sample, self.partitioner.sets) midpoints.append(mp) memberships.append(mv) if self.defuzzy == "mean": final = np.nanmean(midpoints) else: mv_midps = [x * y for x, y in zip(midpoints, memberships)] final = np.nansum(mv_midps, axis=0) / np.nansum(memberships) ret.append(final) return ret
def forecast(self, ndata, **kwargs): explain = kwargs.get('explain', False) fuzzyfied = kwargs.get('fuzzyfied', False) mode = kwargs.get('mode', 'mean') ret = [] l = len(ndata) if not explain else self.max_lag + 1 if l < self.max_lag: return ndata elif l == self.max_lag: l += 1 for k in np.arange(self.max_lag, l): sample = ndata[k - self.max_lag:k] if not fuzzyfied: flrgs = self.generate_lhs_flrg(sample, explain) else: flrgs = self.generate_lhs_flrg_fuzzyfied(sample, explain) midpoints = [] memberships = [] for flrg in flrgs: if flrg.get_key() not in self.flrgs: if len(flrg.LHS) > 0: mp = self.partitioner.sets[flrg.LHS[-1]].centroid mv = self.partitioner.sets[flrg.LHS[-1]].membership( sample[-1]) if not fuzzyfied else None midpoints.append(mp) memberships.append(mv) if explain: self.append_log( "Rule Matching", "{} -> {} (Naïve) Midpoint: {}".format( str(flrg.LHS), flrg.LHS[-1], mp)) else: flrg = self.flrgs[flrg.get_key()] mp = flrg.get_midpoint(self.partitioner.sets) mv = flrg.get_membership( sample, self.partitioner.sets) if not fuzzyfied else None midpoints.append(mp) memberships.append(mv) if explain: self.append_log( "Rule Matching", "{}, Midpoint: {} Membership: {}".format( flrg.get_key(), mp, mv)) if mode == "mean" or fuzzyfied: final = np.nanmean(midpoints) if explain: self.append_log("Deffuzyfication", "By Mean: {}".format(final)) else: final = np.dot(midpoints, memberships) / np.nansum(memberships) if explain: self.append_log("Deffuzyfication", "By Memberships: {}".format(final)) ret.append(final) return ret