Esempio n. 1
0
    def predict(self, x):

        if len(self.window) < self.window_size:  # If there is not enough data, persist
            self.window.append(x)
            self.n = self.n+1
            self.update_mu_and_sigma(x)
            self.update_min_and_max(x)
            self.last_forecast = x
            return x
        else:
            if self.deletion:
                # Check if the model got the last linguistic value right
                if self.partitions and (fuzzify_x_list_t([x], self.partitions)[0] !=
                                        fuzzify_x_list_t([self.last_forecast], self.partitions)[0]):
                    # Otherwise, find and remove the unappropriate rules
                    un_rules = find_inappropriate_rules(self.window[1:], self.alpha_cut,
                                                        self.partitions, self.nsets, self.order)
                    for u_r in un_rules:
                        self.rule_base[1][u_r] = set()

            self.window.pop(0)
            self.window.append(x)

            self.n = self.n + 1
            self.update_mu_and_sigma(x)
            self.update_min_and_max(x)

            self.fit(self.window)
            forecast = forecast_weighted_average_t_sets(self.window[1:], self.rule_base,
                                                        self.alpha_cut, self.partitions, self.nsets, self.order)
            self.last_forecast = forecast
            return forecast
Esempio n. 2
0
 def clean_up(self, x):
     if self.deletion:
         # Check if the model got the last linguistic value right
         if self.partitions and (fuzzify_x_list_t([x], self.partitions)[0]
                                 != fuzzify_x_list_t([self.last_forecast],
                                                     self.partitions)[0]):
             # Otherwise, find and remove the unappropriate rules
             un_rules = find_inappropriate_rules(
                 self.window[(len(self.window) - self.order):],
                 self.alpha_cut, self.partitions, self.nsets, self.order)
             for u_r in un_rules:
                 self.rule_base[1][u_r] = set()
Esempio n. 3
0
    def translate(self, data):

        if not self.rule_base:
            self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order)

        if self.bound_type == 'min-max':
            data_range = np.max(data) - np.min(data)
            lb = np.min(data) - data_range * 0.5
            ub = np.max(data) + data_range * 0.5
        else:  # self.bound_type == 'mu_sigma'
            lb = np.mean(data) - np.std(data) * self.sigma_multiplier
            ub = np.mean(data) + np.std(data) * self.sigma_multiplier

        if self.partitions:
            old_partitions = self.partitions[:]
        else:
            old_partitions = pu.generate_t_partitions(self.nsets, lb, ub)

        # 1) Compute the new partitions
        self.partitions = pu.generate_t_partitions(self.nsets, lb, ub)

        # 2) Verify the pertinence of the old sets centers with respect to the new partitions
        old_centers = [p[1] for p in old_partitions]
        f = fuzzify_x_list_t(old_centers, self.partitions)

        # 3) Compute the final set of partitions
        up_partitions = self.partitions + [
            old_partitions[i] for i in range(len(f)) if f[i] < 0
        ]
        up_partitions = sorted(up_partitions, key=lambda n_p: n_p[1])
        self.partitions = up_partitions
        self.nsets = len(self.partitions)

        # 4) Compute the mappings required to update the rule base
        map_old_new = fuzzify_x_list_t(old_centers, self.partitions)

        # 5) Update rules
        self.rule_base = rbm.update_rule_base(self.rule_base, map_old_new,
                                              np.arange(len(self.partitions)),
                                              self.order)

        # 6) Add new rule
        # Get pertinences
        pertinence_list = pf.t_pertinence_list(
            data[(len(data) - self.order - 1):], self.partitions)

        # Build rule
        rule = rbm.generate_rule(pertinence_list)

        # Add rule to the rule base
        rbm.add_rule(self.rule_base, rule, self.nsets, self.order)
Esempio n. 4
0
    def translate(self):

        old_partitions = self.partitions[:]

        # 1) Compute the new partitions
        self.partitions = pu.generate_t_partitions_from_centers(self.centers)

        # 2) Verify the pertinence of the old sets centers with respect to the new partitions
        old_centers = [p[1] for p in old_partitions]
        #  f = fuzzify_x_list_t(old_centers, self.partitions, self.translation_threshold)

        # 3) Compute the final set of partitions
        up_partitions = self.partitions  # + [old_partitions[i] for i in range(len(f)) if f[i] < 0]
        up_partitions = sorted(up_partitions, key=lambda n_p: n_p[1])
        self.partitions = up_partitions
        self.nsets = len(self.partitions)

        # 4) Compute the mappings required to update the rule base
        map_old_new = fuzzify_x_list_t(old_centers, self.partitions,
                                       self.translation_threshold)

        # 5) Update rules
        self.rule_base = rbm.update_rule_base(self.rule_base, map_old_new,
                                              np.arange(len(self.partitions)),
                                              self.order)