Exemple #1
0
    def get_sweep_results(self, ref_path_prop, out_file=None):
        """
        Get sweep results for a sweep run, dump all simulation info to file if desired

        :param ref_path_prop: path to the PropertyReport.json output file
        :param out_file: file to dump all simulation directories to
        :return: (collected inset chart data, collected property report data)
        """
        all_data = []
        all_data_prop = []
        sim_dirs = []

        for reg_thread in ru.reg_threads:
            sim_dir = os.path.join(reg_thread.sim_root, reg_thread.sim_timestamp)
            sim_dirs.append(sim_dir)
            icj_filename = os.path.join(sim_dir, os.path.join("output", "InsetChart.json"))
            icj_json = ru.load_json(icj_filename)
            all_data.append(icj_json)
            if os.path.exists(ref_path_prop):
                prj_json = ru.load_json(os.path.join(sim_dir, os.path.join("output", "PropertyReport.json")))
                all_data_prop.append(prj_json)

        if out_file:
            with open( out_file, "w" ) as outputs:
                outputs.write( json.dumps( sim_dirs, indent=4, sort_keys=True ) )

        return (all_data, all_data_prop)
Exemple #2
0
    def plot_sweep_results(self, sweep_path, sweep_out_file=None):
        """
        Get sweep results and plot them

        :param sweep_path: regression simulation directory
        :param sweep_out_file: dump all simulation directories to a file, if present
        """
        print("Plot sweep results...\n")

        ref_json_prop = {}
        ref_path_prop = os.path.join(sweep_path, os.path.join("output", "PropertyReport.json"))
        if os.path.exists(ref_path_prop):
            ref_json_prop = ru.load_json(os.path.join(self.cache_cwd, ref_path_prop))

        ref_path = os.path.join(sweep_path, os.path.join("output", "InsetChart.json"))
        ref_json = ru.load_json(os.path.join(self.cache_cwd, ref_path))

        (all_data, all_data_prop) = self.get_sweep_results(ref_path_prop, sweep_out_file)

        param_counts = []

        for param_name in self.param_names:
            param_counts.append(str(len(self.param_values[param_name])))

        plot_title = "Sweep over " + ", ".join(self.param_names).replace(':', '_') + " (" + ", ".join(param_counts) + ") values"
        os.chdir(self.cache_cwd)
        import plotAllCharts
        plotAllCharts.plotBunch(all_data, plot_title, ref_json)

        if os.path.exists(ref_path_prop):
            plotAllCharts.plotBunch(all_data_prop, plot_title, ref_json_prop)
Exemple #3
0
    def doSchemaTest(self):
        # print( "Testing schema generation..." )
        test_schema_path = "test-schema.json"
        subprocess.call([
            self.params.executable_path, "--get-schema", "--schema-path",
            test_schema_path
        ],
                        stdout=open(os.devnull))
        try:
            ru.load_json(test_schema_path)
            print("schema works.")
            os.remove(test_schema_path)
            return "passed"
        except Exception as ex:
            print(str(ex))

        return "failed"  # Well, it doesn't seem to have passed...
Exemple #4
0
    def run_test(self, sim_path, param_overrides=None):
        """
        Run a test given a sim_path

        :param sim_path: path to the regression simulation directory
        :param param_overrides: map of param_names -> param_values to override in config (if present)
        :return: simulation id
        """
        os.chdir(self.cache_cwd)

        sim_id = self.sim_id_generator.get_simulation_id()

        configjson = get_test_config(sim_path, self.test_type, self.report)

        if configjson:
            if self.check_constraints(configjson):
                # override param name/value for sweeps
                if param_overrides:
                    for param_name, param_value in param_overrides.items():
                        TestRunner.override_config_value(
                            configjson, param_name, param_value)

                campjson = flatten_campaign(sim_path, self.test_type,
                                            self.report)

                # add campaign to config
                if campjson is None:
                    campaign_file = None
                    if "Campaign_Filename" in configjson["parameters"]:
                        campaign_file = os.path.join(
                            sim_path,
                            configjson["parameters"]["Campaign_Filename"])
                    configjson["campaign_json"] = load_campjson_from_campaign(
                        sim_path,
                        configjson["parameters"]["Enable_Interventions"],
                        self.runner, campaign_file)
                else:
                    configjson["campaign_json"] = str(campjson)

                # add custom reports, if they exist
                configjson["custom_reports_json"] = str(
                    ru.load_json(os.path.join(sim_path,
                                              "custom_reports.json")))

                thread = self.runner.commissionFromConfigJson(
                    sim_id, configjson, sim_path, self.report,
                    self.scenario_type)
                ru.reg_threads.append(thread)
        else:
            print("Error flattening config.  Skipping " + sim_path)
            ru.final_warnings += "Error flattening config.  Skipped " + sim_path + "\n"

        return sim_id
Exemple #5
0
    def run_test(self, sim_path, param_overrides=None):
        """
        Run a test given a sim_path

        :param sim_path: path to the regression simulation directory
        :param param_overrides: map of param_names -> param_values to override in config (if present)
        :return: simulation id
        """

        os.chdir(self.cache_cwd)

        sim_id = self.sim_id_generator.get_simulation_id()

        configjson = get_test_config(sim_path, self.test_type, self.report)
        if configjson is None:
            print( "Misconfigured test: " + sim_path ) 
        elif "parameters" not in configjson: # for pymod, but could be used elswhere I think
            configjson = { "parameters" : configjson } # hopefully this doesn't look weird. just make the config look like it had the conventional "parameters" even top-level key if it didn't

        if configjson:
            if self.check_constraints(configjson):
                # override param name/value for sweeps
                if param_overrides:
                    for param_name, param_value in param_overrides.items():
                        TestRunner.override_config_value(configjson, param_name, param_value)

                campjson = flatten_campaign(sim_path, self.test_type, self.report)

                # add campaign to config
                if campjson is None:
                    campaign_file = None
                    if "parameters" in configjson:
                        if "Campaign_Filename" in configjson["parameters"]:
                            campaign_file = os.path.join(sim_path, configjson["parameters"]["Campaign_Filename"])
                        ei = False
                        if "parameters" in configjson and "Enable_Interventions" in configjson["parameters"]:
                            ei = configjson["parameters"]["Enable_Interventions"]
                        configjson["campaign_json"] = load_campjson_from_campaign(sim_path, ei, self.runner, campaign_file) 
                else:
                    configjson["campaign_json"] = str(campjson)

                # add custom reports, if they exist
                if os.path.exists( os.path.join(sim_path, "custom_reports.json") ):
                    configjson["custom_reports_json"] = str(ru.load_json(os.path.join(sim_path, "custom_reports.json")))

                thread = self.runner.commissionFromConfigJson(sim_id, configjson, sim_path, self.report, self.scenario_type)
                ru.reg_threads.append(thread)
        else:
            print("Error flattening config.  Skipping " + sim_path)
            ru.final_warnings += "Error flattening config.  Skipped " + sim_path + "\n"

        return sim_id
Exemple #6
0
def add_tests_from_file(regression_list_json, filename):
    """
    Load regression tests from a file and add them to an existing list

    :param regression_list_json: existing list of regression tests, modified by this function
    :param filename: suite filename (e.g. generic.json)
    """
    tests = ru.load_json(filename)
    test_type = reglist_test_type(tests)
    if test_type:
        add_regression_tests(regression_list_json, tests, test_type)
    else:
        print("Warning: failed to add any regression tests from file {0}. Expected either 1) a regression_test "
              "suite file path that points to a collection of tests, or 2) a directory path containing "
              "a single test.".format(filename))
Exemple #7
0
def get_test_config(path, test_type, report):
    """
    Return the test config for a given test path

    :param path: regression test path
    :param test_type: e.g. "tests", "science", etc.
    :param report: regression report (for keeping track of errors in loading)
    :return: json regression test config
    """
    param_overrides_filename = os.path.join(path, "param_overrides.json")
    if os.path.exists(param_overrides_filename):
        return flatten_test_config(param_overrides_filename, test_type, report)
    else:
        print(
            "Warning: no param_overrides.json file available, config will not be flattened"
        )
        return ru.load_json(os.path.join(path, "config.json"))

    return None
Exemple #8
0
def load_campjson_from_campaign(simcfg_path, enable_interventions, runner, campaign_filename=None):
    """
    Load campaign json

    :param simcfg_path: regression test path (directory root)
    :param enable_interventions: 1 = True
    :param campaign_filename: campaign filename specified from config json
    :return: json campaign data
    """
    if enable_interventions == 1:
        if not campaign_filename or not os.path.exists(campaign_filename):
            try:
                campaign_filename = glob.glob(os.path.join(simcfg_path, "campaign_*.json"))[0]
            except Exception as ex:
                print( str( ex ) + " while processing " + simcfg_path )
        runner.campaign_filename = os.path.basename(campaign_filename)
        campjson = ru.load_json(campaign_filename, lambda x: x.replace("u'", "'").replace("'", '"').strip('"'))
        return str(campjson)
    else:
        return {'Events': []}
Exemple #9
0
def get_test_config(path, test_type, report):
    """
    Return the test config for a given test path

    :param path: regression test path
    :param test_type: e.g. "tests", "science", etc.
    :param report: regression report (for keeping track of errors in loading)
    :return: json regression test config
    """
    if test_type == "pymod":
        return setup_pymod_directory( os.path.dirname( path ) )
    else:
        param_overrides_filename = os.path.join(path, "param_overrides.json")
        if os.path.exists(param_overrides_filename):
            return flatten_test_config(param_overrides_filename, test_type, report)
        else:
            #print("Warning: no param_overrides.json file available, config will not be flattened")
            try:
                return ru.load_json(os.path.join(path, "config.json"))
            except Exception as ex:
                print( "Malformed config.json in: " + path ) 

    return None
    def compareJsonOutputs(self, sim_dir, report_name, ref_path, test_path,
                           failures):
        fail_validation = False
        failure_txt = ""

        try:
            ru.load_json(os.path.join(ru.cache_cwd, ref_path))
        except Exception:
            print("Exception {0} {1} loading json file: {2}.".format(
                sys.exc_info()[0],
                sys.exc_info()[1], (os.path.join(ru.cache_cwd, ref_path))))
            return

        ref_json = ru.load_json(os.path.join(sim_dir, ref_path))

        if "Channels" not in ref_json.keys():
            ref_md5 = ru.md5_hash_of_file(ref_path)
            test_md5 = ru.md5_hash_of_file(test_path)
            if ref_md5 == test_md5:
                return False, ""
            else:
                print(self.scenario_path +
                      " completed but did not match reference! (" +
                      str(self.duration) + ") - " + report_name)
                return True, "Non-Channel JSON failed MD5."
        else:
            test_json = ru.load_json(os.path.join(sim_dir, test_path))

            if "Channels" not in test_json.keys():
                return True, "Reference has Channel data and Test file does not."

            ref_md5 = self.get_json_data_hash(ref_json["Channels"])
            test_md5 = self.get_json_data_hash(test_json["Channels"])

            ref_channels = set(ref_json["Channels"])
            test_channels = set(test_json["Channels"])

            if ref_md5 == test_md5:
                return False, ""

            missing_channels = ref_channels - test_channels
            new_channels = test_channels - ref_channels

            if len(missing_channels) > 0:
                fail_validation = True
                print("ERROR: Missing channels - " +
                      ', '.join(missing_channels))
                failure_txt += "Missing channels:\n" + '\n'.join(
                    missing_channels) + "\n"
                self.report.addFailingTest(
                    self.scenario_path, failure_txt,
                    os.path.join(sim_dir, ("output/" + report_name)),
                    self.scenario_type)

            if len(new_channels) > 0:
                print(
                    "WARNING: The test " + report_name + " has " +
                    str(len(new_channels)) +
                    " channels not found in the reference.  Please update the reference "
                    + report_name + ".")
                ru.final_warnings += self.scenario_path + " - New channels not found in reference:\n  " + '\n  '.join(
                    new_channels
                ) + "\nPlease update reference from " + os.path.join(
                    sim_dir, os.path.join("output", "InsetChart.json")) + "!\n"
                self.report.addFailingTest(
                    self.scenario_path, failure_txt,
                    os.path.join(sim_dir, ("output/" + report_name)),
                    self.scenario_type)

            if "Header" in ref_json.keys() and ref_json["Header"][
                    "Timesteps"] != test_json["Header"]["Timesteps"]:
                warning_msg = "WARNING: test " + report_name + " has timesteps " + str(
                    test_json["Header"]["Timesteps"]
                ) + " DIFFERRING from ref " + report_name + " timesteps " + str(
                    ref_json["Header"]["Timesteps"]) + "!\n"
                if self.params.hide_graphs:
                    # This is treated as automated running mode (or bamboo nightly build mode)
                    fail_validation = True
                    failure_txt += warning_msg
                else:
                    # This is treated as manual running mode
                    ru.final_warnings += warning_msg
                    print(warning_msg)

            if not fail_validation:
                #print( "Hasn't failed validation on second level review. Time to look channel by channel, timestep by timestep." )
                # BinnedReport and its derived classes have "Subchannel_Metadata" in the header
                if "Header" in ref_json.keys(
                ) and "Subchannel_Metadata" in ref_json["Header"].keys():
                    self.compareBinnedReportType(ref_json, test_json, failures)
                elif "Header" in ref_json.keys(
                ) and "Report_Type" in ref_json["Header"].keys(
                ) and ref_json["Header"]["Report_Type"] == "InsetChart":
                    # Assuming a BaseChannelReport
                    self.compareChannelReportType(ref_json, test_json,
                                                  failures)
                else:
                    fail_validation = True
                    failures.append(
                        report_name +
                        " - Files are different but cannot do deep dive.")

            if len(failures) > 0:
                fail_validation = True
                failure_txt += "Channel Timestep Reference_Value Test_Value\n" + ''.join(
                    failures)
                print(self.scenario_path +
                      " completed but did not match reference! (" +
                      str(self.duration) + ") - " + report_name)

        return fail_validation, failure_txt
Exemple #11
0
def setup_pymod_directory( path ):
    #print( "PyMod test type: copy up all .pyd, .json, and .py files." ) # maybe later
    if os.path.exists( os.path.join( path, "nd_template.json" ) ) == False:
        print( "Could not find nd_template.json file in " + path )
        return None 
    return ru.load_json( os.path.join( path, "nd_template.json" ) )