コード例 #1
0
    def fit(self, data):
        # Resets the rule base
        self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order)

        # Compute the universe of discourse
        data_range = np.max(data) - np.min(data)
        if self.bound_type == 'min-max':
            lb = np.min(data) - data_range * 0.1
            ub = np.max(data) + data_range * 0.1
        else:  # self.bound_type == 'mu-sigma'
            lb = np.mean(data) - np.std(data) * self.sigma_multiplier
            ub = np.mean(data) + np.std(data) * self.sigma_multiplier

        # Regenerate partitions

        if self.partitionner == 'uniform':
            self.partitions = pu.generate_t_partitions(self.nsets, lb, ub)
        else:  # self.bound_type == 'knn'
            self.partitions = pu.generate_t_partitions_knn(data, self.nsets, lb, ub)

        # Populate the rule base
        for i in range(len(data)-self.order):
            window = data[i:(i+self.order+1)]

            # Get pertinences
            pertinence_list = pf.t_pertinence_list(window, self.partitions)

            # Build rule
            rule = rbm.generate_rule(pertinence_list)

            # Add rule to the rule base
            rbm.add_rule(self.rule_base, rule, self.nsets, self.order)
コード例 #2
0
ファイル: fts_stream.py プロジェクト: rcpsilva/InFTS2018-12
    def retrain(self, data):
        # Resets the rule base
        self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order)

        # Regenerate partitions
        data_range = np.max(data) - np.min(data)

        if self.bound_type == 'min-max':
            self.partitions = pu.generate_t_partitions(
                self.nsets,
                np.min(data) - data_range * 0.1,
                np.max(data) + data_range * 0.1)
        else:  # self.bound_type == 'mu-sigma'
            self.partitions = pu.generate_t_partitions(
                self.nsets,
                np.mean(data) - np.std(data) * self.sigma_multiplier,
                np.mean(data) + np.std(data) * self.sigma_multiplier)

        # Populate the rule base
        for i in range(len(data) - self.order):
            window = data[i:(i + self.order + 1)]

            # Get pertinences
            pertinence_list = pf.t_pertinence_list(window, self.partitions)

            # Build rule
            rule = rbm.generate_rule(pertinence_list)

            # Add rule to the rule base
            rbm.add_rule(self.rule_base, rule, self.nsets, self.order)
コード例 #3
0
ファイル: fts_concrete.py プロジェクト: rcpsilva/InFTS2018-12
    def __init__(self, nsets, order, lb, ub):

        self.nsets = nsets
        self.order = order
        self.lb = lb
        self.ub = ub
        self.fuzzy_sets = np.arange(self.nsets)
        self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order)
        self.partitions = pu.generate_t_partitions(nsets, lb, ub)
        self.alpha_cut = 0
コード例 #4
0
ファイル: fts_stream.py プロジェクト: rcpsilva/InFTS2018-12
    def translate(self, data):

        if not self.rule_base:
            self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order)

        if self.bound_type == 'min-max':
            data_range = np.max(data) - np.min(data)
            lb = np.min(data) - data_range * 0.5
            ub = np.max(data) + data_range * 0.5
        else:  # self.bound_type == 'mu_sigma'
            lb = np.mean(data) - np.std(data) * self.sigma_multiplier
            ub = np.mean(data) + np.std(data) * self.sigma_multiplier

        if self.partitions:
            old_partitions = self.partitions[:]
        else:
            old_partitions = pu.generate_t_partitions(self.nsets, lb, ub)

        # 1) Compute the new partitions
        self.partitions = pu.generate_t_partitions(self.nsets, lb, ub)

        # 2) Verify the pertinence of the old sets centers with respect to the new partitions
        old_centers = [p[1] for p in old_partitions]
        f = fuzzify_x_list_t(old_centers, self.partitions)

        # 3) Compute the final set of partitions
        up_partitions = self.partitions + [
            old_partitions[i] for i in range(len(f)) if f[i] < 0
        ]
        up_partitions = sorted(up_partitions, key=lambda n_p: n_p[1])
        self.partitions = up_partitions
        self.nsets = len(self.partitions)

        # 4) Compute the mappings required to update the rule base
        map_old_new = fuzzify_x_list_t(old_centers, self.partitions)

        # 5) Update rules
        self.rule_base = rbm.update_rule_base(self.rule_base, map_old_new,
                                              np.arange(len(self.partitions)),
                                              self.order)

        # 6) Add new rule
        # Get pertinences
        pertinence_list = pf.t_pertinence_list(
            data[(len(data) - self.order - 1):], self.partitions)

        # Build rule
        rule = rbm.generate_rule(pertinence_list)

        # Add rule to the rule base
        rbm.add_rule(self.rule_base, rule, self.nsets, self.order)
コード例 #5
0
    def __init__(self, nsets, order, window_size, bound_type='min-max', partitionner='uniform'):

        self.nsets = nsets
        self.order = order
        self.fuzzy_sets = np.arange(self.nsets)
        self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order)
        self.window = []  # Stores the last "order" data
        self.partitions = []
        self.alpha_cut = 0
        self.window_size = window_size

        self.sigma_multiplier = 2.698
        self.bound_type = bound_type

        self.partitionner = partitionner
コード例 #6
0
    def __init__(self,
                 nsets,
                 order,
                 bound_type,
                 mod='translate',
                 del_strategy='delete_bad',
                 partitionner='uniform',
                 translation_threshold=0.5):

        self.nsets = nsets
        self.order = order
        self.lb = None
        self.ub = None
        self.fuzzy_sets = np.arange(self.nsets)
        self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order)
        self.window = []  # Stores the last "order" data
        self.partitions = None
        self.alpha_cut = 0
        self.window_size = order + 1

        self.mod = mod
        self.del_strategy = del_strategy
        self.partitionner = partitionner
        self.translation_threshold = translation_threshold

        self.centers = []
        self.samples_per_cluster = np.ones(self.nsets)

        self.mu = 0
        self.sigma = 0
        self.n = 0  # Number of samples
        self.sigma_multiplier = 2.698

        self.min_val = 0
        self.max_val = 0

        self.bound_type = bound_type

        self.last_forecast = []

        self.delete_count = 0
コード例 #7
0
    def __init__(self, nsets, order, bound_type):

        self.nsets = nsets
        self.order = order
        self.lb = None
        self.ub = None
        self.fuzzy_sets = np.arange(self.nsets)
        self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order)
        self.window = []  # Stores the last "order" data
        self.partitions = None
        self.alpha_cut = 0
        self.window_size = order + 1

        self.mu = 0
        self.sigma = 0
        self.n = 0  # Number of samples
        self.sigma_multiplier = 2.698

        self.min_val = 0
        self.max_val = 0

        self.bound_type = bound_type
コード例 #8
0
    def __init__(self, nsets, order, deletion, bound_type='min-max', translation_threshold=0.5):

        self.deletion = deletion
        self.nsets = nsets
        self.order = order
        self.fuzzy_sets = np.arange(self.nsets)
        self.rule_base = rbm.init_rule_base(self.fuzzy_sets, self.order)
        self.window = []  # Stores the last "order" data
        self.partitions = None
        self.alpha_cut = 0
        self.window_size = order+1

        self.mu = 0
        self.sigma = 0
        self.n = 0  # Number of samples
        self.sigma_multiplier = 2.698
        self.last_forecast = None
        self.min_val = 0
        self.max_val = 0

        self.translation_threshold = translation_threshold

        self.bound_type = bound_type