def searchForBestAntecedent(self,example,clas): ruleInstance=Rule( ) ruleInstance.setTwoParameters(self.n_variables, self.compatibilityType) print("In searchForBestAntecedent ,self.n_variables is :" + str(self.n_variables)) ruleInstance.setClass(clas) print("In searchForBestAntecedent ,self.n_labels is :" + str(self.n_labels)) for i in range( 0,self.n_variables): max = 0.0 etq = -1 per= None for j in range( 0, self.n_labels) : print("Inside the second loop of searchForBestAntecedent......") per = self.dataBase.membershipFunction(i, j, example[i]) if (per > max) : max = per etq = j if (max == 0.0) : print("There was an Error while searching for the antecedent of the rule") print("Example: ") for j in range(0,self.n_variables): print(example[j] + "\t") print("Variable " + str(i)) exit(1) ruleInstance.antecedent[i] = self.dataBase.clone(i, etq) return ruleInstance
def generate_negative_rules(self, train, confident_value_pass, zone_confident_pass): class_value_arr = self.get_class_value_array(train) self.prepare_data_rows(train) for i in range(0, len(self.rule_base_array)): rule_negative = Rule(self.data_base) rule_negative.antecedent = self.rule_base_array[i].antecedent positive_rule_class_value = self.rule_base_array[i].get_class() print("the positive rule class value is " + str(positive_rule_class_value) + " ,the i is :" + str(i)) # rule_negative.setClass(positive_rule_class_value) for j in range(0, len(class_value_arr)): class_type = int(class_value_arr[j]) if positive_rule_class_value != class_type: # need to get another class value for negative rule rule_negative.setClass(class_type) # change the class type in the rule rule_negative.calculate_confident_support(self.data_row_array) print("Negative rule's confident value is :" + str(rule_negative.confident_value)) if rule_negative.confident_value > confident_value_pass and rule_negative.zone_confident > zone_confident_pass: rule_negative.weight = rule_negative.confident_value if not (self.duplicated_negative_rule(rule_negative)): for k in range(0, len(rule_negative.antecedent)): print("antecedent L_ " + str(rule_negative.antecedent[j])) # print("Negative rule's class value " + str(rule_negative.get_class())) # print(" Negative rule's weight, confident_vale " + str(rule_negative.weight)) # print(" Negative rule's zone confident value " + str(rule_negative.zone_confident)) # print("Negative rule's positive_rule_class_value" + str(positive_rule_class_value)) # print("Negative rule's class_type" + str(class_type)) self.negative_rule_base_array.append(rule_negative)
def searchForBestAntecedent(self, example, clas): ruleInstance = Rule() ruleInstance.setTwoParameters(self.n_variables, self.compatibilityType) # print("In searchForBestAntecedent ,self.n_variables is :" + str(self.n_variables)) ruleInstance.setClass(clas) # print("In searchForBestAntecedent ,self.n_labels is :" + str(self.n_labels)) example_feature_array = [] for f_variable in range(0, self.n_variables): # print("The f_variable is :"+str(f_variable)) # print("The example is :" + str(example)) example_feature_array.append(example[f_variable]) label_array = [] for i in range(0, self.n_variables): max_value = 0.0 etq = -1 per = None for j in range(0, self.n_labels): # print("Inside the second loop of searchForBestAntecedent......") per = self.dataBase.membershipFunction(i, j, example[i]) if per > max_value: max_value = per etq = j if max_value == 0.0: # print("There was an Error while searching for the antecedent of the rule") # print("Example: ") for j in range(0, self.n_variables): print(example[j] + "\t") print("Variable " + str(i)) exit(1) # print(" The max_value is : " + str(max_value)) # print(" ,the j value is : " + str(j)) ruleInstance.antecedent[i] = self.dataBase.clone( i, etq) # self.dataBase[i][j] label_array.append(etq) data_row_temp = data_row() data_row_temp.set_three_parameters(clas, example_feature_array, label_array) ruleInstance.data_row_here = data_row_temp return ruleInstance
def generate_negative_rules(self, train, confident_value_pass): confident_value = 0 class_value_arr = self.get_class_value_array(train) for i in range(0, len(self.ruleBase)): rule_negative = Rule() rule_negative.antecedent = self.ruleBase[i].antecedent positive_rule_class_value = self.ruleBase[i].get_class() print("the positive rule class value is " + str(positive_rule_class_value) + " ,the i is :" + str(i)) rule_negative.setClass(positive_rule_class_value) for j in range(0, len(class_value_arr)): class_type = int(class_value_arr[j]) if positive_rule_class_value != class_type: # need to get another class value for negative rule rule_negative.setClass( class_type) # change the class type in the rule confident_value = rule_negative.calculate_confident( self.data_row_array) print("The calculation confident value is :" + str(confident_value)) if confident_value >= confident_value_pass: rule_negative.weight = confident_value if not (self.duplicated_negative_rule(rule_negative)): for k in range(0, len(rule_negative.antecedent)): print("antecedent L_ " + str(rule_negative.antecedent[j].label)) print("class value " + str(rule_negative.get_class())) print(" weight " + str(rule_negative.weight)) print("positive_rule_class_value" + str(positive_rule_class_value)) print("class_type" + str(class_type)) self.negative_rule_base_array.append(rule_negative)