コード例 #1
0
def run_single_analyzer(exp_id, analyzer, savefile_prefix=""):
    SetupParser.default_block = 'HPC'
    SetupParser.init()

    am = AnalyzeManager()
    am.add_analyzer(analyzer())
    exp = retrieve_experiment(exp_id)
    am.add_experiment(exp)
    am.analyze()

    df_return = am.analyzers[0].results
    df_return.to_csv("{}_{}.csv".format(savefile_prefix, exp_id), index=False)
    return df_return
コード例 #2
0
def plot_inset_diagnostics(experiment_list,
                           channels=default_channels,
                           working_dir=".",
                           filename="output/InsetChart.json",
                           **kwargs):
    from simtools.Analysis.AnalyzeManager import AnalyzeManager

    am = AnalyzeManager()
    am.add_analyzer(
        inset_channel_plotter(channels,
                              working_dir=working_dir,
                              filename=filename,
                              **kwargs))

    for expt in experiment_list:
        am.add_experiment(expt)
    am.analyze()
コード例 #3
0
def run_experiment(configbuilder, experiment_name, experiment_builder,
                   analyzers):
    run_sim_args = {
        'config_builder': configbuilder,
        'exp_name': experiment_name,
        'exp_builder': experiment_builder
    }

    if not SetupParser.initialized:
        SetupParser.init('HPC')

    exp_manager = ExperimentManagerFactory.init()
    exp_manager.run_simulations(**run_sim_args)
    exp_manager.wait_for_finished(verbose=True)
    assert (exp_manager.succeeded())
    am = AnalyzeManager(exp_manager.experiment)
    for a in analyzers:
        am.add_analyzer(a)
    am.analyze()
コード例 #4
0
def run_analyzers(exp_id, analyzers, savefile_prefix=""):
    def _remove_duplicate_columns(df):
        columns_to_keep = []
        for c in df.columns:
            if "_duplicated" not in c:
                columns_to_keep.append(c)
        return df[columns_to_keep]

    SetupParser.default_block = 'HPC'
    SetupParser.init()

    am = AnalyzeManager()
    for a in analyzers:
        am.add_analyzer(a())
    exp = retrieve_experiment(exp_id)
    am.add_experiment(exp)
    am.analyze()

    if len(analyzers) == 1:
        df_return = am.analyzers[0].results

    elif len(analyzers) > 1:
        df_list = [x.results for x in am.analyzers]
        df_return = pd.merge(df_list[0],
                             df_list[1],
                             on="sim_id",
                             suffixes=["", "_duplicated"])

        # Drop duplicated columns
        # for c in df_result.columns:
        #     if "_duplicated" in c:
        #         df_result.drop(c, inplace=True)
        df_return = _remove_duplicate_columns(df_return)

    else:
        raise ValueError

    df_return.to_csv("{}_{}.csv".format(savefile_prefix, exp_id), index=False)
    return df_return
コード例 #5
0
                    plt.plot(data, label=label, c=c)
                else:
                    plt.plot(data, label=label)

                plt.xlabel("Simulation Time")

        plt.ylabel(self.channel)
        plt.legend()
        plt.show()


if __name__ == "__main__":
    from simtools.Analysis.AnalyzeManager import AnalyzeManager

    am = AnalyzeManager()
    # am.add_analyzer(basic_inset_channel_plotter("True Prevalence"))

    am.add_analyzer(
        inset_channel_plotter("True Prevalence",
                              color_by_expt=True,
                              label_by_expt=True,
                              label_dict={
                                  "7e3073b4-d9f1-e811-a2bd-c4346bcb1555":
                                  "full campaign",
                                  "a2e981fe-d9f1-e811-a2bd-c4346bcb1555":
                                  "no 2011 bednets"
                              },
                              ref_date="2001-01-01"))
    am.add_experiment("7e3073b4-d9f1-e811-a2bd-c4346bcb1555")
    am.add_experiment("a2e981fe-d9f1-e811-a2bd-c4346bcb1555")
    am.analyze()
コード例 #6
0
    # Write special serialized_files_sim_map for runs that need it
    for c in range(56):
        catch = catch_list[c]
        c_folder = "C:/Users/jsuresh/Dropbox (IDM)/Malaria Team Folder/projects/zambia_gridded_sims/kariba_gridded_sims/calibs/{}".format(
            catch)
        sim_map_filename = os.path.join(c_folder,
                                        "serialized_files_sim_map.csv")

        # See if you can find an experiment named CATCH_project_climatefix

        try:
            name_try = "{}_project_climatefix".format(catch)
            proj_exp = DataStore.get_most_recent_experiment(
                id_or_name=name_try)

            # If you can find the experiment, find the corresponding parent simulation and output its sim map
            if proj_exp:
                name_try = "{}_serialize".format(catch)
                orig_exp = DataStore.get_most_recent_experiment(
                    id_or_name=name_try)

                am = AnalyzeManager()
                am.add_experiment(orig_exp)
                am.add_analyzer(
                    SimulationDirectoryMapAnalyzer(save_file=sim_map_filename))
                am.analyze()
                print("Wrote serialized_files_sim_map for {}".format(catch))

        except:
            pass
コード例 #7
0
# This block will be used unless overridden on the command-line
SetupParser.default_block = 'HPC'

cb = DTKConfigBuilder.from_defaults('VECTOR_SIM')
configure_site(cb, 'Namawala')
cb.set_param('Simulation_Duration',365)


analyzers = (TimeseriesAnalyzer(),
             VectorSpeciesAnalyzer())


builder = GenericSweepBuilder.from_dict({'Run_Number': range(5)})

run_sim_args =  {
    'exp_name': 'testrunandanalyze',
    'exp_builder': builder,
    'config_builder':cb
}

if __name__ == "__main__":
    SetupParser.init(selected_block=SetupParser.default_block)
    exp_manager = ExperimentManagerFactory.from_cb(config_builder=cb)
    exp_manager.run_simulations(**run_sim_args)
    exp_manager.wait_for_finished(verbose=True)

    am = AnalyzeManager(exp_manager.experiment)
    for a in analyzers:
        am.add_analyzer(a)
    am.analyze()
        finished = all([em.finished() for em in experiments])

        # Create a new AnalyzeManager and add experiment and analyzer
        am = AnalyzeManager(verbose=False)
        for em in experiments:
            am.add_experiment(em.experiment)

        analyzer = DownloadAnalyzerTPI(
            filenames=['output\\ReportHIVByAgeAndGender.csv'],
            TPI_tag="TPI",
            ignore_TPI=False,
            REP_tag="TPI",
            ignore_REP=True,
            output_path=output_directory)

        am.add_analyzer(analyzer)

        # Make sure we refresh our set of experiments

        for e in experiments:
            e.refresh_experiment()
        COMPS_login(SetupParser.get("server_endpoint"))

        am.analyze()

        # If we are not done we wait for 30 sec, if we are done we leave
        if not finished:
            print("Waiting 30 seconds")
            time.sleep(30)
        else:
            break
コード例 #9
0
cb = CMSConfigBuilder.from_files(model_file='inputs/models/simplemodel.emodl',
                                 config_file='inputs/models/simplemodel.cfg')

# If the base collection containing CMS exists, use it
# If not, use the local
if SetupParser.default_block == "HPC":
    try:
        cb.set_collection_id('CMS 0.82 Pre-release')
    except SimulationAssets.InvalidCollection:
        cb.set_experiment_executable('inputs/compartments/compartments.exe')
        cb.set_dll_root('inputs/compartments')
else:
    cb.set_experiment_executable('inputs/compartments/compartments.exe')
    cb.set_dll_root('inputs/compartments')

run_sim_args = {"config_builder": cb, "exp_name": "First CMS run"}

if __name__ == "__main__":
    SetupParser.init()
    em = ExperimentManagerFactory.from_cb(run_sim_args["config_builder"])
    em.run_simulations(exp_name=run_sim_args["exp_name"])

    # Wait for the simulation to complete
    em.wait_for_finished(verbose=True)

    # Analyze
    am = AnalyzeManager(exp_list='latest')
    am.add_analyzer(SimpleCMSAnalyzer())
    am.analyze()
コード例 #10
0
            data_list.append(all_data[sim])

        return pd.concat(data_list)

    def finalize(self, all_data):
        sim_data_full = self.combine(all_data)
        # print("all_data ",all_data)
        # print("sim_data_full ", sim_data_full)
        if self.save_file:
            sim_data_full.to_csv(self.save_file, index=False)
        return sim_data_full


if __name__ == "__main__":
    SetupParser.default_block = 'HPC'
    SetupParser.init()

    exp_id = sys.argv[1]
    # start_time_step = int(sys.argv[2])

    am = AnalyzeManager()
    am.add_analyzer(
        SaveEndpoint(
            save_file="endpoints_{}.csv".format(exp_id),
            # start_time_step=start_time_step,
            include_counter_report=True,
            output_filename="ReportMalariaFilteredFinal_Year.json"))
    exp = retrieve_experiment(exp_id)
    am.add_experiment(exp)
    am.analyze()
コード例 #11
0
            .format(rounds_so_far, len(sims_so_far["id"])))

        available_sims = sims_so_far.copy()

    print("Samples : {}".format(available_sims["sample"]))

    if len(available_sims) > 0:
        am = AnalyzeManager()
        for sim_id in available_sims["id"]:
            am.add_simulation(retrieve_simulation(sim_id))

        am.add_analyzer(
            comparison_channel_plotter(
                "Blood Smear Parasite Prevalence",
                filenames=['output/ReportMalariaFilteredCatchment.json'],
                reference_data=prev_ref_data,
                ref_date=ref_date,
                legend=False,
                working_dir=catch_folder,
                save_type="other",
                save_name="excl_prev_test"))

        am.analyze()

        plt.figure()

        # Set plot range to 2017:
        for s in available_sims["sample"]:
            df = inc_comp_df[inc_comp_df["sample"] == s]
            plt.plot(np.array(df["cases_sim"]))
        plt.plot(np.array(df["cases_ref"]),
                 color='black',
コード例 #12
0
            if len(sm_mini) > 0:

                if __name__ == "__main__":
                    am = AnalyzeManager()

                    for sim_id in sm_mini["id"]:
                        am.add_simulation(retrieve_simulation(sim_id))

                    am.add_analyzer(
                        comparison_channel_plotter(
                            "Blood Smear Parasite Prevalence",
                            filenames=[
                                'output/ReportMalariaFilteredCatchment.json'
                            ],
                            reference_data=prev_ref_data,
                            ref_date=ref_date,
                            legend=False,
                            working_dir='.',
                            save_type="other",
                            save_name=os.path.join(
                                c_folder,
                                "prev_dist_thresh_{}_inc_{}.png".format(i,
                                                                        j))))

                    am.analyze()

                    pretty_plot_incidence(
                        inc_comp_df,
                        list(sm_mini["__sample_index__"]),
                        savefile=os.path.join(
                            c_folder,
コード例 #13
0
# An example:
if __name__ == "__main__":
    from simtools.Analysis.AnalyzeManager import AnalyzeManager

    am = AnalyzeManager()
    # am.add_analyzer(basic_inset_channel_plotter("True Prevalence"))

    diagnostic_channels = [
        "Daily Bites per Human", "Daily EIR", "Adult Vectors",
        "Blood Smear Parasite Prevalence"
    ]
    am.add_analyzer(
        inset_channel_plotter(diagnostic_channels,
                              color_by_expt=True,
                              label_by_expt=True,
                              label_dict={
                                  "7e3073b4-d9f1-e811-a2bd-c4346bcb1555":
                                  "full campaign",
                                  "a2e981fe-d9f1-e811-a2bd-c4346bcb1555":
                                  "no 2011 bednets"
                              },
                              ref_date="2001-01-01",
                              figsize=(10, 4),
                              plot_xlim=convert_date_list_to_mdate(
                                  ["2010-01-01", "2019-01-01"]),
                              plot_show=False))
    am.add_experiment("7e3073b4-d9f1-e811-a2bd-c4346bcb1555")
    am.add_experiment("a2e981fe-d9f1-e811-a2bd-c4346bcb1555")
    am.analyze()
        'f8394f29-b6e2-e811-a2bd-c4346bcb1555',
        'cd70a86e-b5e2-e811-a2bd-c4346bcb1555',
        'c47d78b3-b4e2-e811-a2bd-c4346bcb1555',
        'c6cbf126-59dd-e811-a2bd-c4346bcb1555',
        'c9eef583-58dd-e811-a2bd-c4346bcb1555',
        '50a088ee-57dd-e811-a2bd-c4346bcb1555'
    ]

    ref_exp = load_experiments_from_file()
    for exp in exp_list:

        tmp = get_experiment_by_id(
            exp, query_criteria=QueryCriteria().select_children(["tags"]))
        if (exp in ref_exp) or (any(
            [sim.state.value != 6 for sim in tmp.get_simulations()])):
            continue

        with open(os.path.join('Experiments', 'experiment_metadata.json'),
                  'r',
                  encoding='utf8') as jsonfile:
            exp_metadata = json.load(jsonfile)
        exp_metadata[exp] = tmp.tags
        with open(os.path.join('Experiments', 'experiment_metadata.json'),
                  'w',
                  encoding='utf8') as jsonfile:
            json.dump(exp_metadata, jsonfile, cls=NumpyEncoder, indent=3)

        am = AnalyzeManager(exp)
        am.add_analyzer(Output2MatlabAnalyzer())
        am.analyze()