def run(g, samsize, interval, outdir): logger.info( "Spectral Test for Modulus {} Multiplier {} Samsize {} Interval {}". format(g.get_modulus(), g.get_multiplier(), samsize, interval)) filename = path.join( outdir, "mod{}_mul{}".format(g.get_modulus(), g.get_multiplier())) # Statistics: [(u1, u2),(u2,u3)...,(un-1,un)] test.statistics(filename + ".csv", g, samsize, interval) # Report r = SimpleReport("SPECTRAL TEST") r.add("Generator", "Class", g.__class__.__name__) r.add("Generator", "Modulus", g.get_modulus()) r.add("Generator", "Multiplier", g.get_multiplier()) r.add("Generator", "Seed", g.get_initial_seed()) r.add("Test Parameters", "Sample Size", samsize) r.add("Test Parameters", "Interval", interval) r.save_txt(filename + "_report.txt") r.save_csv(filename + "_report.csv") logger.info("Report:\n{}".format(r))
def run(modulus, multiplier, outdir=DEFAULT_OUTDIR): filename = path.join(outdir, "mod{}_mul{}".format(modulus, multiplier)) logger.info("MC Check for Multiplier {} Modulus {}".format( multiplier, modulus)) is_mcm = multiplier_check.is_mc_multiplier(multiplier, modulus) logger.info("FP Check for Multiplier {} Modulus {}".format( multiplier, modulus)) is_fpm = multiplier_check.is_fp_multiplier(multiplier, modulus) logger.info("FP/MC Check for Multiplier {} Modulus {}".format( multiplier, modulus)) is_fpmcm = is_mcm and is_fpm # Report r = SimpleReport("MULTIPLIER CHECK") r.add("General", "Modulus", modulus) r.add("General", "Multiplier", multiplier) r.add("Result", "FP", is_fpm) r.add("Result", "MC", is_mcm) r.add("Result", "FP/MC", is_fpmcm) r.save_txt(filename + "_report.txt") print(r)
def run(bits, outdir=DEFAULT_OUTDIR): """ Find a modulus for the given number of bits. :param bits: (int) number of bits; must be positive. :param outdir: (str) path to the output directory. """ logger.info("Computing modulus for Bits {}".format(bits)) filename = path.join(outdir, "mod{}.txt".format(bits)) modulus = modulus_finder.find_modulus(bits) # Report r = SimpleReport("MODULUS") r.add("General", "Bits", bits) r.add("Result", "Modulus", modulus) r.save_txt(filename) print(r)
def run(modulus, outdir=DEFAULT_OUTDIR): filename = path.join(outdir, "mod{}".format(modulus)) logger.info("Computing MC Multipliers for Modulus {}".format(modulus)) mc_multipliers = multiplier_check.get_mc_multipliers(modulus) logger.info("Computing FP Multipliers for Modulus {}".format(modulus)) fp_multipliers = multiplier_check.get_fp_multipliers(modulus) logger.info("Computing FP/MC Multipliers for Modulus {}".format(modulus)) fpmc_multipliers = [] for candidate in fp_multipliers: if candidate in mc_multipliers: fpmc_multipliers.append(candidate) logger.info( "Computing smallest/largest FP/MC Multipliers for Modulus {}".format( modulus)) smallest_fpmc_multiplier = min(fpmc_multipliers, default=None) largest_fpmc_multiplier = max(fpmc_multipliers, default=None) # Save raw data save_list_of_numbers(filename + "_mc.txt", mc_multipliers) save_list_of_numbers(filename + "_fp.txt", fp_multipliers) save_list_of_numbers(filename + "_fpmc.txt", fpmc_multipliers) # Report r = SimpleReport("MULTIPLIERS") r.add("General", "Modulus", modulus) r.add("Multipliers", "FP", len(fp_multipliers)) r.add("Multipliers", "MC", len(mc_multipliers)) r.add("Multipliers", "FP/MC", len(fpmc_multipliers)) r.add("Multipliers (%)", "FP", round(100 * len(fp_multipliers) / (modulus - 1), 3)) r.add("Multipliers (%)", "MC", round(100 * len(mc_multipliers) / (modulus - 1), 3)) r.add("Multipliers (%)", "FP/MC", round(100 * len(fpmc_multipliers) / (modulus - 1), 3)) r.add("Result", "Smallest FP/MC Multiplier", smallest_fpmc_multiplier) r.add("Result", "Largest FP/MC Multiplier", largest_fpmc_multiplier) r.save_txt(filename + "_report.txt") print(r)
def run(g, samsize, bins, confidence, d, outdir): logger.info( "Extremes Test for Modulus {} Multiplier {} Streams {} Jumper {} Bins {} Samsize {} D {} Confidence {}" .format(g.get_modulus(), g.get_multiplier(), g.get_nstreams(), g.get_jumper(), bins, samsize, d, confidence)) filename = path.join( outdir, "mod{}_mul{}_str{}".format(g.get_modulus(), g.get_multiplier(), g.get_nstreams())) # Statistics: [(stream_1, chi_1),(stream_2,chi_2),...,(stream_n,chi_n)] data = test.statistics(g, samsize, bins, d) save_csv(filename + ".csv", ["stream", "value"], data, empty=True) # Critical Bounds mn = test.critical_min(bins, confidence) mx = test.critical_max(bins, confidence) # Theoretical/Empirical Error err = test.error(data, mn, mx, confidence) # Result success = err["err_emp"] <= err["err_thr"] sugg_confidence = 1 - err["err_emp_perc"] # Report r = SimpleReport("TEST OF EXTREMES") r.add("Generator", "Class", g.__class__.__name__) r.add("Generator", "Streams", g.get_nstreams()) r.add("Generator", "Modulus", g.get_modulus()) r.add("Generator", "Multiplier", g.get_multiplier()) r.add("Generator", "Jumper", g.get_jumper()) r.add("Generator", "Seed", g.get_initial_seed()) r.add("Test Parameters", "Sample Size", samsize) r.add("Test Parameters", "Bins", bins) r.add("Test Parameters", "Confidence", round(confidence * 100, 3)) r.add("Test Parameters", "D", d) r.add("Critical Bounds", "Lower Bound", mn) r.add("Critical Bounds", "Upper Bound", mx) r.add( "Error", "Theoretical", "{} ({} %)".format(err["err_thr"], round(err["err_thr_perc"] * 100, 3))) r.add( "Error", "Empirical", "{} ({} %)".format(err["err_emp"], round(err["err_emp_perc"] * 100, 3))) r.add( "Error", "Empirical Lower Bound", "{} ({} %)".format(err["err_mn"], round(err["err_mn_perc"] * 100, 3))) r.add( "Error", "Empirical Upper Bound", "{} ({} %)".format(err["err_mx"], round(err["err_mx_perc"] * 100, 3))) r.add("Result", "Suggested Confidence", round(sugg_confidence * 100, 3)) r.add("Result", "Success", success) r.save_txt(filename + "_report.txt") r.save_csv(filename + "_report.csv") logger.info("Report:\n{}".format(r))
def generate_report(self): """ Generate the simulation report. :return: (SimpleReport) the simulation report. """ r = Report(self.name) alpha = 1.0 - self.confidence # Report - General r.add("general", "mode", self.mode.name) if self.mode is SimulationMode.TRANSIENT_ANALYSIS: r.add("general", "t_stop", self.t_stop) elif self.mode is SimulationMode.PERFORMANCE_ANALYSIS: r.add("general", "batches", self.batches) r.add("general", "batchdim", self.batchdim) else: raise RuntimeError( "The current version supports only TRANSIENT_ANALYSIS and PERFORMANCE_ANALYSIS" ) r.add("general", "confidence", self.confidence) # Report - Randomization r.add("randomization", "generator", self.rndgen.__class__.__name__) r.add("randomization", "iseed", self.rndgen.get_initial_seed()) r.add("randomization", "modulus", self.rndgen.get_modulus()) r.add("randomization", "multiplier", self.rndgen.get_multiplier()) r.add("randomization", "streams", self.rndgen.get_nstreams()) # Report - Arrivals for tsk in TaskScope.concrete(): r.add("arrival", "arrival_{}_dist".format(tsk.name.lower()), Variate.EXPONENTIAL.name) r.add("arrival", "arrival_{}_rate".format(tsk.name.lower()), self.taskgen.rates[tsk]) for tsk in TaskScope.concrete(): r.add("arrival", "generated_{}".format(tsk.name.lower()), self.taskgen.generated[tsk]) # Report - System/Cloudlet r.add("system/cloudlet", "n_servers", self.system.cloudlet.n_servers) r.add("system/cloudlet", "controller_algorithm", self.system.cloudlet.controller.controller_algorithm.name) if self.system.cloudlet.controller.controller_algorithm is ControllerAlgorithm.ALGORITHM_2: r.add("system/cloudlet", "threshold", self.system.cloudlet.threshold) for tsk in TaskScope.concrete(): r.add( "system/cloudlet", "service_{}_dist".format(tsk.name.lower()), self.system.cloudlet.rndservice.var[tsk].name, ) if self.system.cloudlet.rndservice.var[tsk] is Variate.EXPONENTIAL: r.add( "system/cloudlet", "service_{}_rate".format(tsk.name.lower()), 1.0 / self.system.cloudlet.rndservice.par[tsk]["m"], ) else: for p in self.system.cloudlet.rndservice.par[tsk]: r.add( "system/cloudlet", "service_{}_param_{}".format(tsk.name.lower(), p), self.system.cloudlet.rndservice.par[tsk][p], ) # Report - System/Cloud for tsk in TaskScope.concrete(): r.add("system/cloud", "service_{}_dist".format(tsk.name.lower()), self.system.cloud.rndservice.var[tsk].name) if self.system.cloud.rndservice.var[tsk] is Variate.EXPONENTIAL: r.add( "system/cloud", "service_{}_rate".format(tsk.name.lower()), 1.0 / self.system.cloud.rndservice.par[tsk]["m"], ) else: for p in self.system.cloud.rndservice.par[tsk]: r.add( "system/cloud", "service_{}_param_{}".format(tsk.name.lower(), p), self.system.cloud.rndservice.par[tsk][p], ) for tsk in TaskScope.concrete(): r.add("system/cloud", "setup_{}_dist".format(tsk.name.lower()), self.system.cloud.rndsetup.var[tsk].name) for p in self.system.cloud.rndsetup.par[tsk]: r.add( "system/cloud", "service_{}_param_{}".format(tsk.name.lower(), p), self.system.cloud.rndsetup.par[tsk][p], ) # Report - Execution r.add("execution", "clock", self.calendar.get_clock()) r.add("execution", "collected_samples", self.metrics.n_samples) r.add("execution", "collected_batches", self.metrics.n_batches) # Report - State for sys in sorted(self.system.state, key=lambda x: x.name): for tsk in sorted(self.system.state[sys], key=lambda x: x.name): r.add("state", "{}_{}".format(sys.name.lower(), tsk.name.lower()), self.system.state[sys][tsk]) # Report - Statistics for metric in sorted(self.metrics.performance_metrics.__dict__): for sys in sorted(SystemScope, key=lambda x: x.name): for tsk in sorted(TaskScope, key=lambda x: x.name): r.add( "statistics", "{}_{}_{}_mean".format(metric, sys.name.lower(), tsk.name.lower()), getattr(self.metrics.performance_metrics, metric)[sys][tsk].mean(), ) r.add( "statistics", "{}_{}_{}_sdev".format(metric, sys.name.lower(), tsk.name.lower()), getattr(self.metrics.performance_metrics, metric)[sys][tsk].sdev(), ) r.add( "statistics", "{}_{}_{}_cint".format(metric, sys.name.lower(), tsk.name.lower()), getattr(self.metrics.performance_metrics, metric)[sys][tsk].cint(alpha), ) return r
def run(g, test_name, test_params, outdir=DEFAULT_OUTDIR): logger.info( "Kolmogorov-Smirnov Test ({}) for Modulus {} Multiplier {} Streams {} Jumper {}" .format(test_name, g.get_modulus(), g.get_multiplier(), g.get_nstreams(), g.get_jumper())) filename = path.join( outdir, "mod{}_mul{}_str{}".format(g.get_modulus(), g.get_multiplier(), g.get_nstreams())) if test_name == "uniformity_u": raise NotImplementedError( "Kolmogorov-Smirnov on {} is not yet implemented".format( test_name)) # data = uniformity_univariate.statistics(generator, streams, samsize, bins) elif test_name == "uniformity_b": raise NotImplementedError( "Kolmogorov-Smirnov on {} is not yet implemented".format( test_name)) # data = uniformity_bivariate.statistics(generator, streams, samsize, bins) elif test_name == "extremes": chi_square_statistics = extremes.statistics(g, test_params["samsize"], test_params["bins"], test_params["d"]) elif test_name == "runsup": raise NotImplementedError( "Kolmogorov-Smirnov on {} is not yet implemented".format( test_name)) # data = runsup.statistics(generator, streams, samsize, bins) elif test_name == "gap": raise NotImplementedError( "Kolmogorov-Smirnov on {} is not yet implemented".format( test_name)) # data = gap.statistics(generator, streams, samsize, bins, test_params["a"], test_params["b"]) elif test_name == "permutation": raise NotImplementedError( "Kolmogorov-Smirnov on {} is not yet implemented".format( test_name)) # data = permutation.statistics(generator, streams, samsize, bins, test_params["t"]) else: raise ValueError("{} is not a valid testname".format(test_name)) save_csv(filename + ".csv", ["stream", "value"], chi_square_statistics, empty=True) # KS Statistic ks_distances = test.compute_ks_distances(chi_square_statistics, test_params["bins"]) ks_statistic = test.compute_ks_statistic(ks_distances) ks_point = test.compute_ks_point(ks_distances) # KS Critical ks_critical_distance = test.compute_ks_critical_distance( g.get_nstreams(), test_params["confidence"]) # Result success = ks_statistic < ks_critical_distance # Report r = SimpleReport("TEST OF KOLMOGOROV-SMIRNOV") r.add("Generator", "Class", g.__class__.__name__) r.add("Generator", "Streams", g.get_nstreams()) r.add("Generator", "Modulus", g.get_modulus()) r.add("Generator", "Multiplier", g.get_multiplier()) r.add("Generator", "Jumper", g.get_jumper()) r.add("Generator", "Seed", g.get_initial_seed()) r.add("Test Parameters", "ChiSquare Test", test_name) r.add("Test Parameters", "Sample Size", test_params["samsize"]) r.add("Test Parameters", "Bins", test_params["bins"]) r.add("Test Parameters", "Confidence", round(test_params["confidence"] * 100, 3)) if test_name == "extremes": r.add("Test Parameters", "D", test_params["d"]) elif test_name == "gap": r.add("Test Parameters", "A", test_params["a"]) r.add("Test Parameters", "B", test_params["b"]) elif test_name == "permutation": r.add("Test Parameters", "T", test_params["t"]) r.add("KS", "KS Statistic", round(ks_statistic, 3)) r.add("KS", "KS Point X", round(ks_point, 3)) r.add("KS", "KS Critical Distance", round(ks_critical_distance, 3)) r.add("Result", "Success", success) r.save_txt(filename + "_report.txt") r.save_csv(filename + "_report.csv") logger.info("Report:\n{}".format(r))
def validate(analytical_result_path, simulation_result_path): """ Validates the simulation result against the analytical result. :param analytical_result_path: (string) the path of the analytical result file. :param simulation_result_path: (string) the path of the simulation result file. :return: (Report) validation report. """ analytical_result = read_csv(analytical_result_path)[0] simulation_result = read_csv(simulation_result_path)[0] __verify_model_settings(analytical_result, simulation_result) statistics_matching = {} statistics_not_matching = {} for index in INDICES: for system_scope in SYSTEM_SCOPES: for task_scope in TASK_SCOPES: statistic = "{}_{}_{}".format(index, system_scope, task_scope) mean_key = "statistics_{}_mean".format(statistic) cint_key = "statistics_{}_cint".format(statistic) analytical_mean = float(analytical_result[mean_key]) simulation_mean = float(simulation_result[mean_key]) simulation_cint = float(simulation_result[cint_key]) simulation_lower_bound = simulation_mean - simulation_cint simulation_upper_bound = simulation_mean + simulation_cint delta = (analytical_mean - simulation_mean) / simulation_cint if simulation_lower_bound <= analytical_mean <= simulation_upper_bound: statistics_matching[statistic] = delta else: statistics_not_matching[statistic] = delta report = Report("VALIDATION-CLOUD-CLOUDLET") for item in sorted(statistics_matching.items(), key=lambda item: abs(item[1]), reverse=True): statistic = item[0] delta = item[1] mean_key = "statistics_{}_mean".format(statistic) cint_key = "statistics_{}_cint".format(statistic) analytical_mean = float(analytical_result[mean_key]) simulation_mean = float(simulation_result[mean_key]) simulation_cint = float(simulation_result[cint_key]) simulation_lower_bound = simulation_mean - simulation_cint simulation_upper_bound = simulation_mean + simulation_cint report.add( "matching", statistic, "{} inside [{},{}] with {}% semi-interval distance from the simulation mean {}".format( analytical_mean, simulation_lower_bound, simulation_upper_bound, delta * 100, simulation_mean ), ) for item in sorted(statistics_not_matching.items(), key=lambda item: abs(item[1]), reverse=True): statistic = item[0] delta = item[1] mean_key = "statistics_{}_mean".format(statistic) cint_key = "statistics_{}_cint".format(statistic) analytical_mean = float(analytical_result[mean_key]) simulation_mean = float(simulation_result[mean_key]) simulation_cint = float(simulation_result[cint_key]) simulation_lower_bound = simulation_mean - simulation_cint simulation_upper_bound = simulation_mean + simulation_cint report.add( "not matching", statistic, "{} outside [{},{}] with {}% semi-interval distance from the simulation mean {}".format( analytical_mean, simulation_lower_bound, simulation_upper_bound, delta * 100, simulation_mean ), ) return report
def run(modulus, multiplier, streams, outdir=DEFAULT_OUTDIR): logger.info( "Computing Jumpers for Modulus {} Multiplier {} Streams {}".format( modulus, multiplier, streams)) filename = path.join( outdir, "mod{}_mul{}_str{}".format(modulus, multiplier, streams)) jumpers = jumper_finder.find_jumpers(modulus, multiplier, streams) jmax = max(jumpers, key=lambda item: item[1]) # Save raw data save_list_of_pairs(filename + ".csv", jumpers) # Report r = SimpleReport("JUMPER") r.add("General", "Modulus", modulus) r.add("General", "Multiplier", multiplier) r.add("General", "Streams", streams) r.add("Result", "Jumpers", len(jumpers)) r.add("Result", "Best Jumper", jmax[0]) r.add("Result", "Best Jump Size", jmax[1]) r.save_txt(filename + "_report.txt") print(r)
def generate_report(self): """ Generate the solver report. :return: (SimpleReport) the solver report. """ r = Report("ANALYTICAL-SOLUTION") # Report - Arrivals for tsk in TaskScope.concrete(): r.add("arrival", "arrival_{}_dist".format(tsk.name.lower()), Variate.EXPONENTIAL.name) r.add("arrival", "arrival_{}_rate".format(tsk.name.lower()), self.arrival_rates[tsk]) # Report - System/Cloudlet r.add("system/cloudlet", "n_servers", self.clt_n_servers) r.add("system/cloudlet", "controller_algorithm", self.clt_controller_algorithm.name) if self.clt_controller_algorithm is ControllerAlgorithm.ALGORITHM_2: r.add("system/cloudlet", "threshold", self.clt_threshold) for tsk in TaskScope.concrete(): r.add("system/cloudlet", "service_{}_dist".format(tsk.name.lower()), Variate.EXPONENTIAL.name) r.add( "system/cloudlet", "service_{}_rate".format(tsk.name.lower()), self.service_rates[SystemScope.CLOUDLET][tsk], ) # Report - System/Cloud for tsk in TaskScope.concrete(): r.add("system/cloud", "service_{}_dist".format(tsk.name.lower()), Variate.EXPONENTIAL.name) r.add("system/cloud", "service_{}_rate".format(tsk.name.lower()), self.service_rates[SystemScope.CLOUD][tsk]) r.add("system/cloud", "setup_{}_dist".format(TaskScope.TASK_2.name.lower()), Variate.EXPONENTIAL.name) r.add("system/cloud", "service_{}_param_m".format(TaskScope.TASK_2.name.lower()), self.t_setup) # Report - Statistics for performance_metric in sorted( self.solution.performance_metrics.__dict__): for sys in sorted(SystemScope, key=lambda x: x.name): for tsk in sorted(TaskScope, key=lambda x: x.name): r.add( "statistics", "{}_{}_{}_mean".format(performance_metric, sys.name.lower(), tsk.name.lower()), getattr(self.solution.performance_metrics, performance_metric)[sys][tsk], ) # Report - States Probability for state in sorted(self.states_probabilities): r.add("states probability", state, self.states_probabilities[state]) # Report - Routing Probability for probability, value in self.routing_probabilities.items(): r.add("routing probability", probability, value) return r
class ReportTest(unittest.TestCase): def setUp(self): """ The test setup. :return: None """ self.r = SimpleReport("SAMPLE REPORT") self.r.add("Section-1", "1st Value", 1) self.r.add("Section-1", "2nd Value", 2.123) self.r.add("Section-1", "3rd Value", "Hello World") self.r.add("Section-2/Subsection-1", "1st Value", 1) self.r.add("Section-2/Subsection-1", "2nd Value", 2.123) self.r.add("Section-2/Subsection-1", "3rd Value", "Hello World") self.r.add("Section-3", "1st Value", 1) self.r.add("Section-3", "2nd Value", 2.123) self.r.add("Section-3", "3rd Value", "Hello World") self.file_txt = "test.txt" self.file_csv = "test.csv" def test_string_representation(self): """ Test the report string representation. :return: None """ s = "\n" s += "==================================================\n" s += " SAMPLE REPORT \n" s += "==================================================\n" s += "\n" s += " Section-1 \n" s += "1st Value........................................1\n" s += "2nd Value....................................2.123\n" s += "3rd Value..............................Hello World\n" s += "\n" s += " Section-2/Subsection-1 \n" s += "1st Value........................................1\n" s += "2nd Value....................................2.123\n" s += "3rd Value..............................Hello World\n" s += "\n" s += " Section-3 \n" s += "1st Value........................................1\n" s += "2nd Value....................................2.123\n" s += "3rd Value..............................Hello World\n" self.assertEqual(s, str(self.r), "String representation is not correct.") def test_save_txt(self): """ Test the report saving to a TXT file. :return: None """ s = "\n" s += "==================================================\n" s += " SAMPLE REPORT \n" s += "==================================================\n" s += "\n" s += " Section-1 \n" s += "1st Value........................................1\n" s += "2nd Value....................................2.123\n" s += "3rd Value..............................Hello World\n" s += "\n" s += " Section-2/Subsection-1 \n" s += "1st Value........................................1\n" s += "2nd Value....................................2.123\n" s += "3rd Value..............................Hello World\n" s += "\n" s += " Section-3 \n" s += "1st Value........................................1\n" s += "2nd Value....................................2.123\n" s += "3rd Value..............................Hello World\n" self.r.save_txt(self.file_txt) with open(self.file_txt, "r") as f: actual = f.read() self.assertEqual(s, actual, "TXT file representation is not correct.") def test_save_csv(self): """ Test the report saving to a CSV file. :return: None """ s = "name,section-1.1st_value,section-1.2nd_value,section-1.3rd_value,section-2/subsection-1.1st_value,section-2/subsection-1.2nd_value,section-2/subsection-1.3rd_value,section-3.1st_value,section-3.2nd_value,section-3.3rd_value\n" s += "SAMPLE REPORT,1,2.123,Hello World,1,2.123,Hello World,1,2.123,Hello World\n" self.r.save_csv(self.file_csv) with open(self.file_csv, "r") as f: actual = f.read() self.assertEqual(s, actual, "CSV file representation is not correct.")