def friedman_posthoc_tests(experiment_pivot_df): """Returns p-value tables for various Friedman posthoc tests. Results should considered only if Friedman test rejects null hypothesis. """ posthoc_tests = {} posthoc_tests['conover'] = sp.posthoc_conover_friedman(experiment_pivot_df) posthoc_tests['nemenyi'] = sp.posthoc_nemenyi_friedman(experiment_pivot_df) return posthoc_tests
_, norm_p2 = stats.shapiro(inttype_accuracy.CONTROL) _, norm_p3 = stats.shapiro(inttype_accuracy.BUTTON) _, norm_p4 = stats.shapiro(inttype_accuracy.TOUCH) _, var_p = stats.levene(inttype_accuracy.CONTROL, inttype_accuracy.BUTTON, inttype_accuracy.TOUCH, center='median') if norm_p2 < 0.05 or norm_p3 < 0.05 or norm_p4 < 0.05: _, anova_p = stats.friedmanchisquare(inttype_accuracy.CONTROL, inttype_accuracy.BUTTON, inttype_accuracy.TOUCH) if anova_p < 0.05: print("conover test anova_result:", anova_p, sp.posthoc_conover_friedman(inttype_accuracy)) else: melted_df = pd.melt(inttype_accuracy.reset_index(), id_vars="subject", var_name="experiment_type", value_name="accuracy") anova_result = stats_anova.AnovaRM(melted_df, "accuracy", "subject", ["experiment_type"]) print("reperted anova: ", anova_result.fit()) melted_df = pd.melt(inttype_accuracy, var_name="experiment_type", value_name="accuracy") print("levene result", var_p) # gamesHowellTest(melted_df, "experiment_type", "accuracy") multicomp_result = multicomp.MultiComparison( np.array(melted_df.dropna(how='any').accuracy, dtype="float64"),
]: print("Testing using different OWA weights") results[emo].append( main(data_frame=dat, mlm_method=current_methods[emo](owa), evaluation=5)[0]) results[emo].append( main(data_frame=dat, mlm_method=recomended_methods[emo], evaluation=5)[0]) resultsdf = pd.DataFrame(results) friedman = st.friedmanchisquare(*resultsdf.values) print("The statistic for FRNN") print(friedman) if friedman.pvalue < 0.05: p_values = hocs.posthoc_conover_friedman(resultsdf.T, p_adjust="holm") p_values.to_excel(r"Final Output\results_OWA_operators_FRNN_p_values.xlsx") ranks = pd.DataFrame(columns=resultsdf.keys()) for key in resultsdf.keys(): ranks[key] = resultsdf[key].rank(ascending=False) resultsdf["Totals"] = resultsdf.sum(axis=1) / 4 resultsdf["Ranks"] = ranks.mean(axis=1) resultsdf["Nearest Neighbours"] = [ "Strict", "Invadd", "Additive", "Exponential", "Mean", "Trimmed", "Recommended" ] resultsdf.to_excel(r"Final Output\results_OWA_operators_FRNN.xlsx") ## # For the baseline methods determine optimal k, using the optimal weighting schemes for FRNN per dataset. methods = [
medias = df2.mean() aver1 = [medias[0], medias[2], medias[4], medias[6], medias[8]] aver2 = [medias[1], medias[3], medias[5], medias[7], medias[9]] # plt.plot([5,10,15,20,25], aver1, [5,10,15,20,25], aver2,) print(medias) # sns.boxplot(data=df, x="Cases", y="TTotal", hue=df.Cases.tolist()) fig, ax = plt.subplots(1, 1, figsize=(10, 5)) part = [ 23.035425186157227, 9.456007719039917, 8.742131471633911, 10.28268098831176, 11.558128356933594 ] # ax.plot([5,10,15,20,25], aver1, marker='o') ax.plot([5, 10, 15, 20, 25], part, marker='o') ax.set_xlabel("K", fontsize=15) ax.set_ylabel("sec.", fontsize=15) ax.xaxis.set_major_locator(mpl.ticker.FixedLocator([5, 10, 15, 20, 25])) plt.show() print(pg.friedman(data=df2)) t = sp.posthoc_conover_friedman(a=df2) print(t)
print("Task1 Completion Time:\n", task1_time_avg) print("Task1 Completion Time 95\% CI:\n", task1_time_ci) task1_acc_detail = [[ np.average(task1_acc[:, i, k, j]) for i in range(8) for j in range(2) ] for k in range(7)] task1_time_detail = [[ np.average(task1_time[:, i, k, j]) for i in range(8) for j in range(2) ] for k in range(7)] acc_fm_res = stats.friedmanchisquare(*task1_acc_detail) time_fm_res = stats.friedmanchisquare(*task1_time_detail) print(acc_fm_res) print( sp.posthoc_conover_friedman(np.array(task1_acc_detail).T, p_adjust=None)) print(time_fm_res) print(sp.posthoc_conover_friedman(np.array(task1_time_detail).T)) print() print("E2") # Task 2 - density_2 task2_acc = np.zeros((total_obtain_answers, 8, 7, 2)).astype(np.int) task2_time = np.zeros((total_obtain_answers, 8, 7, 2)).astype(np.float) learning_effect_acc = np.zeros( (total_obtain_answers, 8, 14)).astype(np.float) learning_effect_time = np.zeros( (total_obtain_answers, 8, 14)).astype(np.float) task2_personal = np.zeros((total_obtain_answers)).astype(np.float) offset = 2 + 9 * 2 * 7 + 20
# inttype_accuracy = inttype_accuracy.append(buf, ignore_index=True) inttype_accuracy.mean() inttype_accuracy.std() # inttype_accuracy_cross = pd.crosstab(inttype_accuracy.experiment, inttype_accuracy.result) # stats.chi2_contingency(inttype_accuracy_cross) _, norm_p1 = stats.shapiro(inttype_accuracy.BASELINE) _, norm_p2 = stats.shapiro(inttype_accuracy.CONTROL) _, norm_p3 = stats.shapiro(inttype_accuracy.BUTTON) _, norm_p4 = stats.shapiro(inttype_accuracy.TOUCH) _, var_p = stats.levene(inttype_accuracy.BASELINE, inttype_accuracy.CONTROL, inttype_accuracy.BUTTON, inttype_accuracy.TOUCH, center='median') if norm_p1 < 0.05 or norm_p2 < 0.05 or norm_p3 < 0.05 or norm_p4 < 0.05: _, anova_p = stats.friedmanchisquare(inttype_accuracy.BASELINE, inttype_accuracy.CONTROL, inttype_accuracy.BUTTON, inttype_accuracy.TOUCH) if anova_p < 0.05: print("conover test anova_result:", anova_p, sp.posthoc_conover_friedman(inttype_accuracy)) else: melted_df = pd.melt(inttype_accuracy.reset_index(), id_vars="subject", var_name="experiment_type", value_name="accuracy") anova_result = stats_anova.AnovaRM(melted_df, "accuracy", "subject", ["experiment_type"]) print("reperted anova: ", anova_result.fit()) melted_df = pd.melt(inttype_accuracy, var_name="experiment_type", value_name="accuracy") print("levene result", var_p) # gamesHowellTest(melted_df, "experiment_type", "accuracy") multicomp_result = multicomp.MultiComparison(np.array(melted_df.dropna(how='any').accuracy, dtype="float64"), melted_df.dropna(how='any').experiment_type) print(multicomp_result.tukeyhsd().summary()) subject_accuracy = inttype_accuracy.T _, anova_p = stats.friedmanchisquare( subject_accuracy.ando, subject_accuracy.aso, subject_accuracy.hikosaka,