def main(): parser = argparse.ArgumentParser() parser.add_argument('--conf', '-c', nargs='+') parser.add_argument('--bench', '-b', nargs='+') parser.add_argument('--meta', '-m') args = parser.parse_args() # Read in benchmark coefficient of variance cloud_stats = boxplot_benchmark.benchmark_stats(args.bench) cloud_coef = (np.mean(np.array(cloud_stats['stds']) / np.array(cloud_stats['means']))) meta_stats = boxplot_benchmark.benchmark_stats(['bench-' + args.meta]) meta_coef = (np.mean(np.array(meta_stats['stds']) / np.array(meta_stats['means']))) # Read in configuration quality _, data = paths.sorted_scenario_data(args.conf) vm_qualities = np.hstack(data) # Read in meta quality _, meta_data = paths.sorted_scenario_data([args.meta]) meta_qualities = np.hstack(meta_data) # Compare cloud to meta mean_diff = (np.mean(vm_qualities) - np.mean(meta_qualities)) # Compare print(', '.join(['Scenario', 'Total difference between cloud and meta', 'Mean coefficient of variance'])) print(', '.join(map(str, [ os.path.basename(os.path.dirname(os.path.abspath(args.meta))), mean_diff, cloud_coef])))
def compare_validation_groups(folders): groups = [] rest = folders try: while(True): delimiter = rest.index('vs.') groups.append(rest[:delimiter]) rest = rest[delimiter+1:] except ValueError: groups.append(rest) if len(groups) == 1: logging.critical('Seperate groups by vs.') return 1 data = (paths.sorted_scenario_data(group)[1] for group in groups) prev = None print('Adapted significance level {:0.4f}'.format(adapted_significance(len(groups) - 1))) for current in data: if not prev: prev = current continue p, diff = test.hybrid_permutation(np.hstack(prev), np.hstack(current)) print('Significance level {:0.4f} with mean difference {}'.format(p, diff))
def compare_validation_results(folders): scenarios, data = paths.sorted_scenario_data(folders) compare_validation_factor(scenarios, data)