def Apriori_LERS(FILENAME, iter1, iter2, minsup, minconf) : # rule induction fullpath_filename = '/data/uci/'+FILENAME+'/apriori/'+'rules_'+str(iter1)+'-'+str(iter2)+'-'+str(minsup)+'-'+str(minconf)+'.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile(fullpath_filename) else apriori.getRulesByApriori(FILENAME, iter1, iter2, minsup, minconf) # rule save if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # test data setup filepath = '/data/uci/'+FILENAME+'/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.tsv' decision_table_test = mlem2.getDecisionTable(filepath) decision_table_test = decision_table_test.dropna() decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist() filepath = '/data/uci/'+FILENAME+'/'+FILENAME+'.nominal' list_nominal = mlem2.getNominalList(filepath) list_judgeNominal = mlem2.getJudgeNominal(decision_table_test, list_nominal) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(list(map(str,decision_class)), predictions) #print('{FILENAME} : {iter1} {iter2}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2)) logging.basicConfig(filename=os.path.dirname(os.path.abspath("__file__"))+'/'+FILENAME+'.log',format='%(asctime)s,%(message)s',level=logging.DEBUG) logging.info('Apriori_LERS,{FILENAME},{iter1},{iter2},{acc},{minsup},{minconf}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2,acc=accuracy,minsup=minsup,minconf=minconf)) return(accuracy)
def MLEM2_LERS(FILENAME, iter1, iter2) : # rule induction fullpath_filename = DIR_UCI+'/'+FILENAME+'/rules/'+'rules_'+str(iter1)+'-'+str(iter2)+'.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile(fullpath_filename) else mlem2.getRulesByMLEM2(FILENAME, iter1, iter2) # rule save if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # test data setup filepath = DIR_UCI+'/'+FILENAME+'/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.tsv' decision_table_test = mlem2.getDecisionTable(filepath) decision_table_test = decision_table_test.dropna() decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist() filepath = DIR_UCI+'/'+FILENAME+'/'+FILENAME+'.nominal' list_nominal = mlem2.getNominalList(filepath) list_judgeNominal = mlem2.getJudgeNominal(decision_table_test, list_nominal) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(decision_class, predictions) #print('{FILENAME} : {iter1} {iter2}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2)) #logging.info('MLEM2_LERS,1,{FILENAME},{iter1},{iter2},{acc}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2,acc=accuracy)) savepath = DIR_UCI+'/'+FILENAME+'/MLEM2_LERS.csv' with open(savepath, "a") as f : f.writelines('MLEM2_LERS,1,{FILENAME},{iter1},{iter2},{acc}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2,acc=accuracy)+"\n") return(accuracy)
def MLEM2_LERS(FILENAME, iter1, iter2): # rule induction and rule save fullpath_filename = DIR_UCI + '/' + FILENAME + '/rules/' + 'rules_' + str( iter1) + '-' + str(iter2) + '.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile( fullpath_filename) else mlem2.getRulesByMLEM2(FILENAME, iter1, iter2) if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # test data setup decision_table_test, decision_class = getData(FILENAME, iter1, iter2, T="test") list_judgeNominal = getJudgeNominal(decision_table_test, FILENAME) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(decision_class, predictions) # rules の数を求める num = len(rules) # 各クラスのrulesの数を求める num_class = strNumClassRules(rules) # 平均の長さを求める mean_length = mlem2.getMeanLength(rules) # 平均支持度と平均確信度を求める decision_table_train, decision_class = getData(FILENAME, iter1, iter2, T="train") list_judgeNominal = getJudgeNominal(decision_table_train, FILENAME) mean_support, mean_conf = LERS.getSupportConfidenceRules( rules, decision_table_train, list_judgeNominal) # AccとRecallを求める acc_recall = LERS.getAccurayRecall(rules, decision_table_train, list_judgeNominal) # ファイルにsave savepath = DIR_UCI + '/' + FILENAME + '/fairness/00_normal/MLEM2_LERS.csv' with open(savepath, "a") as f: f.writelines( 'MLEM2_LERS,{FILENAME},{iter1},{iter2},{acc},{num},{num_class},{mean_length},{mean_support},{mean_conf},{acc_recall}' .format(FILENAME=FILENAME, iter1=iter1, iter2=iter2, acc=accuracy, num=num, num_class=num_class, mean_length=mean_length, mean_support=mean_support, mean_conf=mean_conf, acc_recall=strAccRecall(rules, acc_recall)) + "\n") return (0)
def MLEM2_OnlyK_LERS(FILENAME, iter1, iter2, k): print("START iter1 iter2 k : " + str(iter1) + "," + str(iter2) + "," + str(k)) # rule induction fullpath_filename = DIR_UCI + '/' + FILENAME + '/rules/' + 'rules_' + str( iter1) + '-' + str(iter2) + '.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile( fullpath_filename) else mlem2.getRulesByMLEM2(FILENAME, iter1, iter2) # rule save if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # only-k rule filter fullpath_filename = DIR_UCI + '/' + FILENAME + '/rules_onlyK/' + 'rules-' + str( k) + '_' + str(iter1) + '-' + str(iter2) + '.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile( fullpath_filename) else [r for r in rules if len(r.getSupport()) >= k] # rule save if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # test data setup filepath = DIR_UCI + '/' + FILENAME + '/' + FILENAME + '-test' + str( iter1) + '-' + str(iter2) + '.tsv' decision_table_test = mlem2.getDecisionTable(filepath) decision_table_test = decision_table_test.dropna() decision_class = decision_table_test[ decision_table_test.columns[-1]].values.tolist() filepath = DIR_UCI + '/' + FILENAME + '/' + FILENAME + '.nominal' list_nominal = mlem2.getNominalList(filepath) list_judgeNominal = mlem2.getJudgeNominal(decision_table_test, list_nominal) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(decision_class, predictions) #print('{FILENAME} : {iter1} {iter2}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2)) #logging.info('MLEM2_OnlyK_LERS,{k},{FILENAME},{iter1},{iter2},{acc}'.format(FILENAME=FILENAME,k=k,iter1=iter1,iter2=iter2,acc=accuracy)) savepath = DIR_UCI + '/' + FILENAME + '/MLEM2_OnlyK_LERS.csv' with open(savepath, "a") as f: f.writelines( 'MLEM2_OnlyK_LERS,{k},{FILENAME},{iter1},{iter2},{acc}'.format( FILENAME=FILENAME, k=k, iter1=iter1, iter2=iter2, acc=accuracy) + "\n") #print("END iter1 iter2 k : " + str(iter1) + "," + str(iter2) + "," + str(k)) return (accuracy)
def MLEM2_RuleClusteringBySameCondition_LERS(FILENAME, iter1, iter2, k): # rule induction fullpath_filename = DIR_UCI + '/' + FILENAME + '/rules/' + 'rules_' + str( iter1) + '-' + str(iter2) + '.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile( fullpath_filename) else mlem2.getRulesByMLEM2(FILENAME, iter1, iter2) # rule save if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # rule clustering fullpath_filename = DIR_UCI + '/' + FILENAME + '/rules_cluster_same_condition/' + 'rules-' + str( k) + '_' + str(iter1) + '-' + str(iter2) + '.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile( fullpath_filename) else clustering.getRuleClusteringBySameCondition( rules, k=k) # rule save if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # test data setup filepath = DIR_UCI + '/' + FILENAME + '/' + FILENAME + '-test' + str( iter1) + '-' + str(iter2) + '.tsv' decision_table_test = mlem2.getDecisionTable(filepath) decision_table_test = decision_table_test.dropna() decision_class = decision_table_test[ decision_table_test.columns[-1]].values.tolist() filepath = DIR_UCI + '/' + FILENAME + '/' + FILENAME + '.nominal' list_nominal = mlem2.getNominalList(filepath) list_judgeNominal = mlem2.getJudgeNominal(decision_table_test, list_nominal) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(decision_class, predictions) #logging.info('MLEM2_RuleClusteringBySameCondition_LERS,{k},{FILENAME},{iter1},{iter2},{acc}'.format(FILENAME=FILENAME,k=k,iter1=iter1,iter2=iter2,acc=accuracy)) savepath = DIR_UCI + '/' + FILENAME + '/MLEM2_RuleClusteringBySameCondition_LERS.csv' with open(savepath, "a") as f: f.writelines( 'MLEM2_RuleClusteringBySameCondition_LERS,{k},{FILENAME},{iter1},{iter2},{acc}' .format( FILENAME=FILENAME, k=k, iter1=iter1, iter2=iter2, acc=accuracy) + "\n") return (accuracy)
def MLEM2_LERS(FILENAME, iter1, iter2): # rule induction and rule save fullpath_filename = DIR_UCI + "/" + FILENAME + "/rules/" + "rules_" + str(iter1) + "-" + str(iter2) + ".pkl" rules = ( mlem2.loadPickleRules(fullpath_filename) if os.path.isfile(fullpath_filename) else mlem2.getRulesByMLEM2(FILENAME, iter1, iter2) ) if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # test data setup decision_table_test, decision_class = getData(FILENAME, iter1, iter2, T="test") list_judgeNominal = getJudgeNominal(decision_table_test, FILENAME) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(decision_class, predictions) # rules の数を求める num = len(rules) # 各クラスのrulesの数を求める num_class = strNumClassRules(rules) # 平均の長さを求める mean_length = mlem2.getMeanLength(rules) # 平均支持度と平均確信度を求める decision_table_train, decision_class = getData(FILENAME, iter1, iter2, T="train") list_judgeNominal = getJudgeNominal(decision_table_train, FILENAME) mean_support, mean_conf = LERS.getSupportConfidenceRules(rules, decision_table_train, list_judgeNominal) # AccとRecallを求める acc_recall = LERS.getAccurayRecall(rules, decision_table_train, list_judgeNominal) # ファイルにsave savepath = DIR_UCI + "/" + FILENAME + "/fairness/00_normal/MLEM2_LERS.csv" with open(savepath, "a") as f: f.writelines( "MLEM2_LERS,{FILENAME},{iter1},{iter2},{acc},{num},{num_class},{mean_length},{mean_support},{mean_conf},{acc_recall}".format( FILENAME=FILENAME, iter1=iter1, iter2=iter2, acc=accuracy, num=num, num_class=num_class, mean_length=mean_length, mean_support=mean_support, mean_conf=mean_conf, acc_recall=strAccRecall(rules, acc_recall), ) + "\n" ) return 0
def Apriori_LERS(FILENAME, iter1, iter2, minsup, minconf): # rule induction fullpath_filename = '/data/uci/' + FILENAME + '/apriori/' + 'rules_' + str( iter1) + '-' + str(iter2) + '-' + str(minsup) + '-' + str( minconf) + '.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile( fullpath_filename) else apriori.getRulesByApriori( FILENAME, iter1, iter2, minsup, minconf) # rule save if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # test data setup filepath = '/data/uci/' + FILENAME + '/' + FILENAME + '-test' + str( iter1) + '-' + str(iter2) + '.tsv' decision_table_test = mlem2.getDecisionTable(filepath) decision_table_test = decision_table_test.dropna() decision_class = decision_table_test[ decision_table_test.columns[-1]].values.tolist() filepath = '/data/uci/' + FILENAME + '/' + FILENAME + '.nominal' list_nominal = mlem2.getNominalList(filepath) list_judgeNominal = mlem2.getJudgeNominal(decision_table_test, list_nominal) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(list(map(str, decision_class)), predictions) #print('{FILENAME} : {iter1} {iter2}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2)) logging.basicConfig(filename=os.path.dirname(os.path.abspath("__file__")) + '/' + FILENAME + '.log', format='%(asctime)s,%(message)s', level=logging.DEBUG) logging.info( 'Apriori_LERS,{FILENAME},{iter1},{iter2},{acc},{minsup},{minconf}'. format(FILENAME=FILENAME, iter1=iter1, iter2=iter2, acc=accuracy, minsup=minsup, minconf=minconf)) return (accuracy)
def MLEM2_RuleClusteringByConsistentTimesSimExceptMRule_LERS(FILENAME, iter1, iter2, k, m) : # rule induction fullpath_filename = DIR_UCI+'/'+FILENAME+'/rules/'+'rules_'+str(iter1)+'-'+str(iter2)+'.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile(fullpath_filename) else mlem2.getRulesByMLEM2(FILENAME, iter1, iter2) # rule save if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # rule clustering filepath = DIR_UCI+'/'+FILENAME+'/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.tsv' decision_table = mlem2.getDecisionTable(filepath) colnames = mlem2.getColNames(decision_table) filepath = DIR_UCI+'/'+FILENAME+'/'+FILENAME+'.nominal' list_nominal = mlem2.getNominalList(filepath) list_judgeNominal = mlem2.getJudgeNominal(decision_table, list_nominal) fullpath_filename = DIR_UCI+'/'+FILENAME+'/rules_cluster_consistent_times_sim_except_mrule/'+'rules-'+str(k)+'_'+str(iter1)+'-'+str(iter2)+'.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile(fullpath_filename) else clustering.getRuleClusteringByConsistentTimesSimilarityExceptMRule(rules, colnames, list_judgeNominal, k=k, m=m) # rule save if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # test data setup filepath = DIR_UCI+'/'+FILENAME+'/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.tsv' decision_table_test = mlem2.getDecisionTable(filepath) decision_table_test = decision_table_test.dropna() decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist() filepath = DIR_UCI+'/'+FILENAME+'/'+FILENAME+'.nominal' list_nominal = mlem2.getNominalList(filepath) list_judgeNominal = mlem2.getJudgeNominal(decision_table_test, list_nominal) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(decision_class, predictions) #print('{FILENAME} : {iter1} {iter2}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2)) #logging.info('MLEM2_RuleClusteringByConsistentSimExceptMRule_LERS,{k},{FILENAME},{iter1},{iter2},{acc}'.format(FILENAME=FILENAME,k=k,iter1=iter1,iter2=iter2,acc=accuracy)) savepath = DIR_UCI+'/'+FILENAME+'/MLEM2_RuleClusteringByConsistentTimesSimExceptMRule_LERS.csv' with open(savepath, "a") as f : f.writelines('MLEM2_RuleClusteringByConsistentTimesSimExceptMRule_LERS,{k},{FILENAME},{iter1},{iter2},{acc}'.format(FILENAME=FILENAME,k=k,iter1=iter1,iter2=iter2,acc=accuracy)+"\n") return(accuracy)
#rules_new = getRuleClusteringBySimilarity(rules, colnames, list_judgeNominal, k=3) #rules_new = getRuleClusteringByRandom(rules, k=3) #rules_new = getRuleClusteringBySameCondition(rules, k=3) #rules_new = getRuleClusteringByConsistentSimilarity(rules, colnames, list_judgeNominal, k=3) #rules_new = getRuleClusteringByConsistentSimilarityExceptMRule(rules, colnames, list_judgeNominal, k=3, m=3) #rules_new = getRuleClusteringByConsistentTimesSimilarityExceptMRule(rules, colnames, list_judgeNominal, k=3, m=3) rules_new = getRuleClusteringBySimilarityExceptMRule(rules, colnames, list_judgeNominal, k=3, m=3) rules_new = getRuleClusteringByConsistentExceptMRule(rules, colnames, list_judgeNominal, k=3, m=3) # predict by LERS filepath = '/mnt/data/uci/'+FILENAME+'/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.tsv' decision_table_test = mlem2.getDecisionTable(filepath) decision_table_test = decision_table_test.dropna() decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist() predictions = LERS.predictByLERS(rules_new, decision_table_test, list_judgeNominal) print(accuracy_score(decision_class, predictions)) # 全セットで確かめ #for iter1 in range(1,11): # for iter2 in range(1,11): # print('i1:{iter1} i2:{iter2}'.format(iter1=iter1,iter2=iter2)) # rules = mlem2.getRulesByMLEM2(FILENAME, iter1, iter2) # filepath = '/data/uci/'+FILENAME+'/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.tsv' # decision_table = mlem2.getDecisionTable(filepath) # colnames = mlem2.getColNames(decision_table) # filepath = '/data/uci/'+FILENAME+'/'+FILENAME+'.nominal' # list_nominal = mlem2.getNominalList(filepath) # list_judgeNominal = mlem2.getJudgeNominal(decision_table, list_nominal)
# ======================================== # main # ======================================== if __name__ == "__main__": FILENAME = 'hayes-roth' iter1 = 4 iter2 = 5 minsup = 10 minconf = 1.0 rules = getRulesByApriori(FILENAME, iter1, iter2, minsup, minconf) # test data setup filepath = '/data/uci/'+FILENAME+'/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.tsv' decision_table_test = mlem2.getDecisionTable(filepath) decision_table_test = decision_table_test.dropna() decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist() filepath = '/data/uci/'+FILENAME+'/'+FILENAME+'.nominal' list_nominal = mlem2.getNominalList(filepath) list_judgeNominal = mlem2.getJudgeNominal(decision_table_test, list_nominal) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(list(map(str,decision_class)), predictions) print(accuracy)
def MLEM2_delEAlphaRule_LERS(FILENAME, iter1, iter2, DELFUN, CLASS, ATTRIBUTE_VALUE, alpha): print(datetime.now().strftime('%Y/%m/%d %H:%M:%S') + ' ' + FILENAME + ' ' + str(iter1) + ' ' + str(iter2) + ' ' + DELFUN.__name__ + ' ' + strAttributeValue(ATTRIBUTE_VALUE) + ' ' + str(alpha) + ' ' + "START") # rule induction and rule save fullpath_filename = DIR_UCI + '/' + FILENAME + '/rules/' + 'rules_' + str( iter1) + '-' + str(iter2) + '.pkl' rules = mlem2.loadPickleRules(fullpath_filename) if os.path.isfile( fullpath_filename) else mlem2.getRulesByMLEM2(FILENAME, iter1, iter2) if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # train data setup decision_table_train, decision_class = getData(FILENAME, iter1, iter2, T="train") list_judgeNominal = getJudgeNominal(decision_table_train, FILENAME) # alpha差別的なルールの基本条件削除 or ルールを削除 if CLASS != "ALL": rules_target = mlem2.getRulesClass(rules, CLASS) rules_nontarget = mlem2.getRulesClass(rules, CLASS, judge=False) for attr in ATTRIBUTE_VALUE: for e in ATTRIBUTE_VALUE[attr]: rules_target = DELFUN(rules_target, attr, e, decision_table_train, list_judgeNominal, alpha) rules_target.extend(rules_nontarget) rules = rules_target else: for attr in ATTRIBUTE_VALUE: for e in ATTRIBUTE_VALUE[attr]: rules = DELFUN(rules, attr, e, decision_table_train, list_judgeNominal, alpha) print(datetime.now().strftime('%Y/%m/%d %H:%M:%S') + ' ' + FILENAME + ' ' + str(iter1) + ' ' + str(iter2) + ' ' + DELFUN.__name__ + ' ' + strAttributeValue(ATTRIBUTE_VALUE) + ' ' + str(alpha) + ' ' + "RULES") # test data setup decision_table_test, decision_class = getData(FILENAME, iter1, iter2, T="test") list_judgeNominal = getJudgeNominal(decision_table_test, FILENAME) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(decision_class, predictions) # rules の数を求める num = len(rules) # 各クラスのrulesの数を求める num_class = strNumClassRules(rules) # 平均の長さを求める mean_length = mlem2.getMeanLength(rules) # 平均支持度と平均確信度を求める list_judgeNominal = getJudgeNominal(decision_table_train, FILENAME) mean_support, mean_conf = LERS.getSupportConfidenceRules( rules, decision_table_train, list_judgeNominal) # AccとRecallを求める acc_recall = LERS.getAccurayRecall(rules, decision_table_train, list_judgeNominal) # ファイルにsave savepath = DIR_UCI + '/' + FILENAME + '/fairness/02_alpha_preserve/MLEM2_delEAlphaRule_LERS.csv' with open(savepath, "a") as f: f.writelines( 'MLEM2_delEAlphaRule_LERS,{DELFUN},{CLASS},{FILENAME},{ATTRIBUTE_VALUE},{alpha},{iter1},{iter2},{acc},{num},{num_class},{mean_length},{mean_support},{mean_conf},{acc_recall}' .format(DELFUN=DELFUN.__name__, CLASS=CLASS, FILENAME=FILENAME, ATTRIBUTE_VALUE=strAttributeValue(ATTRIBUTE_VALUE), alpha=alpha, iter1=iter1, iter2=iter2, acc=accuracy, num=num, num_class=num_class, mean_length=mean_length, mean_support=mean_support, mean_conf=mean_conf, acc_recall=strAccRecall(rules, acc_recall)) + "\n") print(datetime.now().strftime('%Y/%m/%d %H:%M:%S') + ' ' + FILENAME + ' ' + str(iter1) + ' ' + str(iter2) + ' ' + DELFUN.__name__ + ' ' + strAttributeValue(ATTRIBUTE_VALUE) + ' ' + str(alpha) + ' ' + "END") return (0)
def MLEM2_delEAlphaRule_LERS(FILENAME, iter1, iter2, DELFUN, CLASS, ATTRIBUTE_VALUE, alpha): print( datetime.now().strftime("%Y/%m/%d %H:%M:%S") + " " + FILENAME + " " + str(iter1) + " " + str(iter2) + " " + DELFUN.__name__ + " " + strAttributeValue(ATTRIBUTE_VALUE) + " " + str(alpha) + " " + "START" ) # rule induction and rule save fullpath_filename = DIR_UCI + "/" + FILENAME + "/rules/" + "rules_" + str(iter1) + "-" + str(iter2) + ".pkl" rules = ( mlem2.loadPickleRules(fullpath_filename) if os.path.isfile(fullpath_filename) else mlem2.getRulesByMLEM2(FILENAME, iter1, iter2) ) if not os.path.isfile(fullpath_filename): mlem2.savePickleRules(rules, fullpath_filename) # train data setup decision_table_train, decision_class = getData(FILENAME, iter1, iter2, T="train") list_judgeNominal = getJudgeNominal(decision_table_train, FILENAME) # alpha差別的なルールの基本条件削除 or ルールを削除 if CLASS != "ALL": rules_target = mlem2.getRulesClass(rules, CLASS) rules_nontarget = mlem2.getRulesClass(rules, CLASS, judge=False) for attr in ATTRIBUTE_VALUE: for e in ATTRIBUTE_VALUE[attr]: rules_target = DELFUN(rules_target, attr, e, decision_table_train, list_judgeNominal, alpha) rules_target.extend(rules_nontarget) rules = rules_target else: for attr in ATTRIBUTE_VALUE: for e in ATTRIBUTE_VALUE[attr]: rules = DELFUN(rules, attr, e, decision_table_train, list_judgeNominal, alpha) print( datetime.now().strftime("%Y/%m/%d %H:%M:%S") + " " + FILENAME + " " + str(iter1) + " " + str(iter2) + " " + DELFUN.__name__ + " " + strAttributeValue(ATTRIBUTE_VALUE) + " " + str(alpha) + " " + "RULES" ) # test data setup decision_table_test, decision_class = getData(FILENAME, iter1, iter2, T="test") list_judgeNominal = getJudgeNominal(decision_table_test, FILENAME) # predict by LERS predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal) # 正答率を求める accuracy = accuracy_score(decision_class, predictions) # rules の数を求める num = len(rules) # 各クラスのrulesの数を求める num_class = strNumClassRules(rules) # 平均の長さを求める mean_length = mlem2.getMeanLength(rules) # 平均支持度と平均確信度を求める list_judgeNominal = getJudgeNominal(decision_table_train, FILENAME) mean_support, mean_conf = LERS.getSupportConfidenceRules(rules, decision_table_train, list_judgeNominal) # AccとRecallを求める acc_recall = LERS.getAccurayRecall(rules, decision_table_train, list_judgeNominal) # ファイルにsave savepath = DIR_UCI + "/" + FILENAME + "/fairness/02_alpha_preserve/MLEM2_delEAlphaRule_LERS.csv" with open(savepath, "a") as f: f.writelines( "MLEM2_delEAlphaRule_LERS,{DELFUN},{CLASS},{FILENAME},{ATTRIBUTE_VALUE},{alpha},{iter1},{iter2},{acc},{num},{num_class},{mean_length},{mean_support},{mean_conf},{acc_recall}".format( DELFUN=DELFUN.__name__, CLASS=CLASS, FILENAME=FILENAME, ATTRIBUTE_VALUE=strAttributeValue(ATTRIBUTE_VALUE), alpha=alpha, iter1=iter1, iter2=iter2, acc=accuracy, num=num, num_class=num_class, mean_length=mean_length, mean_support=mean_support, mean_conf=mean_conf, acc_recall=strAccRecall(rules, acc_recall), ) + "\n" ) print( datetime.now().strftime("%Y/%m/%d %H:%M:%S") + " " + FILENAME + " " + str(iter1) + " " + str(iter2) + " " + DELFUN.__name__ + " " + strAttributeValue(ATTRIBUTE_VALUE) + " " + str(alpha) + " " + "END" ) return 0