コード例 #1
0
def run_single_analyzer(exp_id, analyzer, savefile_prefix=""):
    SetupParser.default_block = 'HPC'
    SetupParser.init()

    am = AnalyzeManager()
    am.add_analyzer(analyzer())
    exp = retrieve_experiment(exp_id)
    am.add_experiment(exp)
    am.analyze()

    df_return = am.analyzers[0].results
    df_return.to_csv("{}_{}.csv".format(savefile_prefix, exp_id), index=False)
    return df_return
コード例 #2
0
def plot_inset_diagnostics(experiment_list,
                           channels=default_channels,
                           working_dir=".",
                           filename="output/InsetChart.json",
                           **kwargs):
    from simtools.Analysis.AnalyzeManager import AnalyzeManager

    am = AnalyzeManager()
    am.add_analyzer(
        inset_channel_plotter(channels,
                              working_dir=working_dir,
                              filename=filename,
                              **kwargs))

    for expt in experiment_list:
        am.add_experiment(expt)
    am.analyze()
コード例 #3
0
def run_analyzers(exp_id, analyzers, savefile_prefix=""):
    def _remove_duplicate_columns(df):
        columns_to_keep = []
        for c in df.columns:
            if "_duplicated" not in c:
                columns_to_keep.append(c)
        return df[columns_to_keep]

    SetupParser.default_block = 'HPC'
    SetupParser.init()

    am = AnalyzeManager()
    for a in analyzers:
        am.add_analyzer(a())
    exp = retrieve_experiment(exp_id)
    am.add_experiment(exp)
    am.analyze()

    if len(analyzers) == 1:
        df_return = am.analyzers[0].results

    elif len(analyzers) > 1:
        df_list = [x.results for x in am.analyzers]
        df_return = pd.merge(df_list[0],
                             df_list[1],
                             on="sim_id",
                             suffixes=["", "_duplicated"])

        # Drop duplicated columns
        # for c in df_result.columns:
        #     if "_duplicated" in c:
        #         df_result.drop(c, inplace=True)
        df_return = _remove_duplicate_columns(df_return)

    else:
        raise ValueError

    df_return.to_csv("{}_{}.csv".format(savefile_prefix, exp_id), index=False)
    return df_return
コード例 #4
0
                    plt.plot(data, label=label, c=c)
                else:
                    plt.plot(data, label=label)

                plt.xlabel("Simulation Time")

        plt.ylabel(self.channel)
        plt.legend()
        plt.show()


if __name__ == "__main__":
    from simtools.Analysis.AnalyzeManager import AnalyzeManager

    am = AnalyzeManager()
    # am.add_analyzer(basic_inset_channel_plotter("True Prevalence"))

    am.add_analyzer(
        inset_channel_plotter("True Prevalence",
                              color_by_expt=True,
                              label_by_expt=True,
                              label_dict={
                                  "7e3073b4-d9f1-e811-a2bd-c4346bcb1555":
                                  "full campaign",
                                  "a2e981fe-d9f1-e811-a2bd-c4346bcb1555":
                                  "no 2011 bednets"
                              },
                              ref_date="2001-01-01"))
    am.add_experiment("7e3073b4-d9f1-e811-a2bd-c4346bcb1555")
    am.add_experiment("a2e981fe-d9f1-e811-a2bd-c4346bcb1555")
    am.analyze()
コード例 #5
0
    # Write special serialized_files_sim_map for runs that need it
    for c in range(56):
        catch = catch_list[c]
        c_folder = "C:/Users/jsuresh/Dropbox (IDM)/Malaria Team Folder/projects/zambia_gridded_sims/kariba_gridded_sims/calibs/{}".format(
            catch)
        sim_map_filename = os.path.join(c_folder,
                                        "serialized_files_sim_map.csv")

        # See if you can find an experiment named CATCH_project_climatefix

        try:
            name_try = "{}_project_climatefix".format(catch)
            proj_exp = DataStore.get_most_recent_experiment(
                id_or_name=name_try)

            # If you can find the experiment, find the corresponding parent simulation and output its sim map
            if proj_exp:
                name_try = "{}_serialize".format(catch)
                orig_exp = DataStore.get_most_recent_experiment(
                    id_or_name=name_try)

                am = AnalyzeManager()
                am.add_experiment(orig_exp)
                am.add_analyzer(
                    SimulationDirectoryMapAnalyzer(save_file=sim_map_filename))
                am.analyze()
                print("Wrote serialized_files_sim_map for {}".format(catch))

        except:
            pass
    # Dump the experiment ids for resume
    with open('ids.json', 'w') as out:
        json.dump(experiments_ids, out)

    # While the experiments are running, we are analyzing every 30 seconds
    while True:
        print("Analyzing !")

        # Determine if we are done at the beginning of the loop
        # We will still analyze everything even if we are done
        finished = all([em.finished() for em in experiments])

        # Create a new AnalyzeManager and add experiment and analyzer
        am = AnalyzeManager(verbose=False)
        for em in experiments:
            am.add_experiment(em.experiment)

        analyzer = DownloadAnalyzerTPI(
            filenames=['output\\ReportHIVByAgeAndGender.csv'],
            TPI_tag="TPI",
            ignore_TPI=False,
            REP_tag="TPI",
            ignore_REP=True,
            output_path=output_directory)

        am.add_analyzer(analyzer)

        # Make sure we refresh our set of experiments

        for e in experiments:
            e.refresh_experiment()
        plt.ylabel("Fraction of sims eliminating")
        plt.title(vc_packs[i])
        plt.legend()

    plt.show()



if __name__=="__main__":
    if True:
        # analyzer_list = [ExtractInfectionResult()]
        analyzer_list = [ExtractInfectionResult(),
                         SimulationDirectoryMapAnalyzer(save_file="sim_map.csv")]
        exp_list = ["520818ca-ae3b-e911-a2c5-c4346bcb7273"]

        am = AnalyzeManager(force_analyze=True)

        for exp_name in exp_list:
            am.add_experiment(retrieve_experiment(exp_name))
        for a in analyzer_list:
            am.add_analyzer(a)

        am.analyze()

    if True:
        convert_infection_csv_to_elim()


    if False:
        plot_elim_curves(y=2)
コード例 #8
0
    # Run endpoint analyzer on original run, and output as endpoint_original.csv
    for c in catch_nums:
        catch = catch_list[c]
    catch_folder = os.path.join(cf_folder, catch)

    if run_endpoint_original:
        sim_map_filename = os.path.join(catch_folder, "sim_map_original.csv")
        endpoint_filename = os.path.join(catch_folder, "endpoint_original.csv")

        analyzer_list = []
        analyzer_list += [SaveEndpoint(save_file=endpoint_filename, year_to_use=6),
                          SimulationDirectoryMapAnalyzer(save_file=sim_map_filename)]

        am = AnalyzeManager()
        exp = retrieve_experiment(orig_exp["exp_id"])
        am.add_experiment(exp)

        for a in analyzer_list:
            am.add_analyzer(a)

        am.analyze()

    if run_endpoint_cf:
        sim_map_filename = os.path.join(catch_folder, "sim_map_cf.csv")
        endpoint_filename = os.path.join(catch_folder, "endpoint_cf.csv")

        analyzer_list = []
        analyzer_list += [SaveEndpoint(save_file=endpoint_filename, year_to_use=6),
                          SimulationDirectoryMapAnalyzer(save_file=sim_map_filename)]

        am = AnalyzeManager()