def create_sim_map(burnin_id) :

    am = AnalyzeManager(burnin_id, analyzers=SimulationDirectoryMapAnalyzer())
    am.analyze()

    sim_map_dict = am.analyzers[0].results
    df = pd.concat([pd.DataFrame(exp) for exp_id, exp in sim_map_dict.items()])
    return df
def run_single_analyzer(exp_id, analyzer, savefile_prefix=""):
    SetupParser.default_block = 'HPC'
    SetupParser.init()

    am = AnalyzeManager()
    am.add_analyzer(analyzer())
    exp = retrieve_experiment(exp_id)
    am.add_experiment(exp)
    am.analyze()

    df_return = am.analyzers[0].results
    df_return.to_csv("{}_{}.csv".format(savefile_prefix, exp_id), index=False)
    return df_return
def plot_inset_diagnostics(experiment_list,
                           channels=default_channels,
                           working_dir=".",
                           filename="output/InsetChart.json",
                           **kwargs):
    from simtools.Analysis.AnalyzeManager import AnalyzeManager

    am = AnalyzeManager()
    am.add_analyzer(
        inset_channel_plotter(channels,
                              working_dir=working_dir,
                              filename=filename,
                              **kwargs))

    for expt in experiment_list:
        am.add_experiment(expt)
    am.analyze()
def run_analyzers(exp_id, analyzers, savefile_prefix=""):
    def _remove_duplicate_columns(df):
        columns_to_keep = []
        for c in df.columns:
            if "_duplicated" not in c:
                columns_to_keep.append(c)
        return df[columns_to_keep]

    SetupParser.default_block = 'HPC'
    SetupParser.init()

    am = AnalyzeManager()
    for a in analyzers:
        am.add_analyzer(a())
    exp = retrieve_experiment(exp_id)
    am.add_experiment(exp)
    am.analyze()

    if len(analyzers) == 1:
        df_return = am.analyzers[0].results

    elif len(analyzers) > 1:
        df_list = [x.results for x in am.analyzers]
        df_return = pd.merge(df_list[0],
                             df_list[1],
                             on="sim_id",
                             suffixes=["", "_duplicated"])

        # Drop duplicated columns
        # for c in df_result.columns:
        #     if "_duplicated" in c:
        #         df_result.drop(c, inplace=True)
        df_return = _remove_duplicate_columns(df_return)

    else:
        raise ValueError

    df_return.to_csv("{}_{}.csv".format(savefile_prefix, exp_id), index=False)
    return df_return
Beispiel #5
0
def run_experiment(configbuilder, experiment_name, experiment_builder,
                   analyzers):
    run_sim_args = {
        'config_builder': configbuilder,
        'exp_name': experiment_name,
        'exp_builder': experiment_builder
    }

    if not SetupParser.initialized:
        SetupParser.init('HPC')

    exp_manager = ExperimentManagerFactory.init()
    exp_manager.run_simulations(**run_sim_args)
    exp_manager.wait_for_finished(verbose=True)
    assert (exp_manager.succeeded())
    am = AnalyzeManager(exp_manager.experiment)
    for a in analyzers:
        am.add_analyzer(a)
    am.analyze()
    # Write special serialized_files_sim_map for runs that need it
    for c in range(56):
        catch = catch_list[c]
        c_folder = "C:/Users/jsuresh/Dropbox (IDM)/Malaria Team Folder/projects/zambia_gridded_sims/kariba_gridded_sims/calibs/{}".format(
            catch)
        sim_map_filename = os.path.join(c_folder,
                                        "serialized_files_sim_map.csv")

        # See if you can find an experiment named CATCH_project_climatefix

        try:
            name_try = "{}_project_climatefix".format(catch)
            proj_exp = DataStore.get_most_recent_experiment(
                id_or_name=name_try)

            # If you can find the experiment, find the corresponding parent simulation and output its sim map
            if proj_exp:
                name_try = "{}_serialize".format(catch)
                orig_exp = DataStore.get_most_recent_experiment(
                    id_or_name=name_try)

                am = AnalyzeManager()
                am.add_experiment(orig_exp)
                am.add_analyzer(
                    SimulationDirectoryMapAnalyzer(save_file=sim_map_filename))
                am.analyze()
                print("Wrote serialized_files_sim_map for {}".format(catch))

        except:
            pass
from simtools.Analysis.AnalyzeManager import AnalyzeManager
from simtools.Analysis.BaseAnalyzers import DownloadAnalyzer

if __name__ == "__main__":
    analyzer = DownloadAnalyzer(filenames=[
        'campaign_Swaziland_v05Sep2018_FINAL_reftracktopup_2011_v2.json',
        'config.json', 'Swaziland_Demographics_With_Properties.json',
        'Risk_Assortivity_Overlay.json', 'PFA_Overlay.json',
        'Accessibility_and_Risk_IP_Overlay.json'
    ])
    am = AnalyzeManager(["6f6ff06d-fed6-e811-a2bd-c4346bcb1555"],
                        analyzers=analyzer)
    am.analyze()
Beispiel #8
0
# This block will be used unless overridden on the command-line
SetupParser.default_block = 'HPC'

cb = DTKConfigBuilder.from_defaults('VECTOR_SIM')
configure_site(cb, 'Namawala')
cb.set_param('Simulation_Duration',365)


analyzers = (TimeseriesAnalyzer(),
             VectorSpeciesAnalyzer())


builder = GenericSweepBuilder.from_dict({'Run_Number': range(5)})

run_sim_args =  {
    'exp_name': 'testrunandanalyze',
    'exp_builder': builder,
    'config_builder':cb
}

if __name__ == "__main__":
    SetupParser.init(selected_block=SetupParser.default_block)
    exp_manager = ExperimentManagerFactory.from_cb(config_builder=cb)
    exp_manager.run_simulations(**run_sim_args)
    exp_manager.wait_for_finished(verbose=True)

    am = AnalyzeManager(exp_manager.experiment)
    for a in analyzers:
        am.add_analyzer(a)
    am.analyze()
Beispiel #9
0
        if len(selected) == 0:
            print("No data have been returned... Exiting...")
            return

        d = pd.concat(selected).reset_index(drop=True)
        d['year'] = d['time'].apply(lambda x : int(x/365))

        d = d[d['time'] >= 100]
        summed = d.groupby(self.sweep_variables + ['Run_Number'])[self.channel].agg(np.sum).reset_index()
        df = summed.groupby(self.sweep_variables)[self.channel].agg(np.mean).reset_index()

        print(df)

        sns.set_style('white', {'axes.linewidth' : 0.5})
        fig = plt.figure()
        ax = fig.gca()
        sns.heatmap(df.pivot(index=self.sweep_variables[0], columns=self.sweep_variables[1], values=self.channel),
                    ax=ax)
        ax.set_ylabel(self.sweep_variables[0])
        ax.set_xlabel(self.sweep_variables[1])
        plt.show()
        plt.close('all')


if __name__ == '__main__' :

    expid = '63903283-1f7b-e811-a2c0-c4346bcb7275'
    am = AnalyzeManager(expid,
                        analyzers=Sweep2DAnalyzer(['coverage', 'initial_killing']))
    am.analyze()
Beispiel #10
0
cb = CMSConfigBuilder.from_files(model_file='inputs/models/simplemodel.emodl',
                                 config_file='inputs/models/simplemodel.cfg')

# If the base collection containing CMS exists, use it
# If not, use the local
if SetupParser.default_block == "HPC":
    try:
        cb.set_collection_id('CMS 0.82 Pre-release')
    except SimulationAssets.InvalidCollection:
        cb.set_experiment_executable('inputs/compartments/compartments.exe')
        cb.set_dll_root('inputs/compartments')
else:
    cb.set_experiment_executable('inputs/compartments/compartments.exe')
    cb.set_dll_root('inputs/compartments')

run_sim_args = {"config_builder": cb, "exp_name": "First CMS run"}

if __name__ == "__main__":
    SetupParser.init()
    em = ExperimentManagerFactory.from_cb(run_sim_args["config_builder"])
    em.run_simulations(exp_name=run_sim_args["exp_name"])

    # Wait for the simulation to complete
    em.wait_for_finished(verbose=True)

    # Analyze
    am = AnalyzeManager(exp_list='latest')
    am.add_analyzer(SimpleCMSAnalyzer())
    am.analyze()
from COMPS.Data import QueryCriteria
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from PythonHelperFunctions.utils import load_output_path
from SCOutputAnalyzer import SCOutputAnalyzer
SetupParser.default_block = "HPC"

if __name__ == "__main__":
    SetupParser.init()
    exp_list = pd.read_csv(os.path.join(load_output_path(),
                                        'Experiment_tracking.csv'),
                           index_col="Index")

    for index, row in exp_list.iterrows():

        exp = row['Experiment ID']
        print('Checking experiment ' + exp)
        tmp = get_experiment_by_id(
            exp, query_criteria=QueryCriteria().select_children(["tags"]))
        foldername = row['Description']
        outDir = os.path.join(load_output_path(), foldername, 'simOutputs')
        if not os.path.exists(outDir):
            os.mkdir(outDir)

        am = AnalyzeManager(exp,
                            analyzers=SCOutputAnalyzer(filenames=[
                                'output/InsetChart.json',
                                'output/PropertyReport.json'
                            ],
                                                       output_path=outDir))
        am.analyze()
Beispiel #12
0
            ]
           for smc_coverage in coverages
           for seed in range(num_seeds)
        ]

builder = ModBuilder.from_list(SMC)

run_sim_args = {'config_builder': cb,
                'exp_name': expname,
                'exp_builder': builder}


if __name__ == "__main__":

    SetupParser.default_block = 'HPC'

    SetupParser.init()
    exp_manager = ExperimentManagerFactory.init()
    exp_manager.run_simulations(**run_sim_args)
    # Wait for the simulations to be done
    exp_manager.wait_for_finished(verbose=True)
    assert (exp_manager.succeeded())

    analyzer = PrevalenceAnalyzer(expt_name=expname,
                                  sweep_variables=["Run_Number",
                                                   "Coverage"
                                                   ])

    am = AnalyzeManager(exp_manager.experiment, analyzers=analyzer)
    am.analyze()
Beispiel #13
0
        adf = pd.concat(selected).reset_index(drop=True)

        adf.sort_values(by=self.sweep_variables + ['year'], inplace=True)
        adf.to_csv(os.path.join(self.working_dir, '%s.csv' % self.expt_name),
                   index=False)


if __name__ == '__main__':

    out_dir = os.path.join(wdir, 'data')

    experiments = {"End60": "229a4440-8d1f-e911-a2be-c4346bcb1554"}

    for expt_name, exp_id in experiments.items():
        am = AnalyzeManager(exp_list=exp_id,
                            analyzers=[
                                IvermectinAnalyzer(
                                    working_dir=out_dir,
                                    expt_name=expt_name,
                                    sweep_variables=[
                                        "Run_Number",
                                        "x_Temporary_Larval_Habitat",
                                        'dirus_Anthropophily', 'target',
                                        'coverage', "intervention"
                                    ])
                            ],
                            force_analyze=True)

        print(am.experiments)
        am.analyze()
        plt.ylabel("Fraction of sims eliminating")
        plt.title(vc_packs[i])
        plt.legend()

    plt.show()



if __name__=="__main__":
    if True:
        # analyzer_list = [ExtractInfectionResult()]
        analyzer_list = [ExtractInfectionResult(),
                         SimulationDirectoryMapAnalyzer(save_file="sim_map.csv")]
        exp_list = ["520818ca-ae3b-e911-a2c5-c4346bcb7273"]

        am = AnalyzeManager(force_analyze=True)

        for exp_name in exp_list:
            am.add_experiment(retrieve_experiment(exp_name))
        for a in analyzer_list:
            am.add_analyzer(a)

        am.analyze()

    if True:
        convert_infection_csv_to_elim()


    if False:
        plot_elim_curves(y=2)
        df.to_csv(os.path.join(self.working_dir, '%s.csv' % self.dir_name), index=False)


if __name__ == "__main__":
    SetupParser.init("HPC")
    out_dir = os.path.join('E:/', 'Dropbox (IDM)', 'Malaria Team Folder', 'projects',
                           'map_intervention_impact', 'lookup_tables')

    sites = pd.read_csv("site_details.csv")

    experiments = {
                   "corr_itn_irs" :"e7fd04a2-a8d8-e811-a2bd-c4346bcb1555"
                   }

    for dirname, exp_id in experiments.items():

        am = AnalyzeManager(exp_list=exp_id, analyzers=[BurdenAnalyzer(working_dir=out_dir,
                                                                     dir_name=dirname,
                                                                     report_names = sites["name"].tolist(),
                                                                      sweep_variables=["Run_Number",
                                                                                       "x_Temporary_Larval_Habitat",
                                                                                       "IRS_Coverage",
                                                                                       "ITN_Coverage",
                                                                                       "ITN_IRS_Coverage",
                                                                                       ])],
                            force_analyze=True)

        print(am.experiments)
        am.analyze()

                    if sdf[self.channel][j] == sdf[self.channel][j+365]:
                        eliminated[str(c) + ' / ' + str(r)] += 1
                    #    infections[str(c) + ' / ' + str(r)] = sdf[self.channel]
                    for num in range (1460):
                        infect[num] += sdf[self.channel][j-1000+num]
                    j += 4*365
                for num in range(1460):
                    infect[num]
                ax.plot(range(1460), infect, linewidth = 1, label='cov %.2f' % (c))
        ax.legend()
        ax.set_xlabel('Time')
        ax.set_ylabel('Infected')
        #plt.savefig(os.path.join(plotdir, '123myplot123.pdf'), format='PDF')
        plt.title('ATSB Infection Reduction (CDC)')
        plt.show()
        plt.close('all')








if __name__ == '__main__' :

    expid = '905a91f4-2e9b-e811-a2c0-c4346bcb7275'
    am = AnalyzeManager(expid,
                        analyzers=VectorCountAnalyzer())
    am.analyze()
Beispiel #17
0
        value1 = tags[param1]
        value2 = tags[param2]
        sweep_set = '%s-%s--%s-%s' % (param1, value1, param2, value2)
        return sweep_name, sweep_set

    def get_sim_folder(self, simulation):
        sweep_name, sweep_set = self.determine_sweep_set(simulation.tags)
        return os.path.join(self.output_path, sweep_name, sweep_set,
                            simulation.id)

    def select_simulation_data(self, data, simulation):
        # Create a folder for the current simulation
        sim_folder = self.get_sim_folder(simulation)
        os.makedirs(sim_folder, exist_ok=True)

        # Create the requested files
        for filename in self.filenames:
            file_path = os.path.join(sim_folder, os.path.basename(filename))
            with open(file_path, 'wb') as outfile:
                outfile.write(data[filename])


if __name__ == "__main__":
    analyzer = DownloadAnalyzerWithTags(
        filenames=['output\\ReportHIVByAgeAndGender.csv'])
    experiments = retrieve_item(
        itemid='5a088005-19f8-e911-a2c3-c4346bcb1551')  # suite_id passed here
    experiments = [e.id for e in experiments]
    am = AnalyzeManager(exp_list=experiments, analyzers=analyzer)
    am.analyze()
if __name__=="__main__":
    # Run endpoint analyzer on original run, and output as endpoint_original.csv
    for c in catch_nums:
        catch = catch_list[c]
    catch_folder = os.path.join(cf_folder, catch)

    if run_endpoint_original:
        sim_map_filename = os.path.join(catch_folder, "sim_map_original.csv")
        endpoint_filename = os.path.join(catch_folder, "endpoint_original.csv")

        analyzer_list = []
        analyzer_list += [SaveEndpoint(save_file=endpoint_filename, year_to_use=6),
                          SimulationDirectoryMapAnalyzer(save_file=sim_map_filename)]

        am = AnalyzeManager()
        exp = retrieve_experiment(orig_exp["exp_id"])
        am.add_experiment(exp)

        for a in analyzer_list:
            am.add_analyzer(a)

        am.analyze()

    if run_endpoint_cf:
        sim_map_filename = os.path.join(catch_folder, "sim_map_cf.csv")
        endpoint_filename = os.path.join(catch_folder, "endpoint_cf.csv")

        analyzer_list = []
        analyzer_list += [SaveEndpoint(save_file=endpoint_filename, year_to_use=6),
                          SimulationDirectoryMapAnalyzer(save_file=sim_map_filename)]
Beispiel #19
0
from simtools.Analysis.AnalyzeManager import AnalyzeManager
from simtools.Analysis.BaseAnalyzers import BaseAnalyzer



class PopulationAnalyzer(BaseAnalyzer):
    def __init__(self):
        super().__init__(filenames=['output\\InsetChart.json'])

    def select_simulation_data(self, data, simulation):
        # Apply is called for every simulations included into the experiment
        # We are simply storing the population data in the pop_data dictionary
        return data[self.filenames[0]]["Channels"]["Statistical Population"]["Data"]

    def finalize(self, all_data):
        import matplotlib.pyplot as plt
        for pop in list(all_data.values()):
            plt.plot(pop)
        plt.legend([s.id for s in all_data.keys()])
        plt.show()


# This code will analyze the latest experiment ran with the PopulationAnalyzer
if __name__ == "__main__":
    am = AnalyzeManager('latest', analyzers=PopulationAnalyzer())
    am.analyze()
        x = np.linspace(0, 0.25, 100)
        y = np.linspace(
            min(sdf2['pre-prevalence']), max(sdf2['pre-prevalence']), 100
        )  #np.arange(min(sdf['Log_x_Temp']), max(sdf['Log_x_Temp'])+(sdf['Log_x_Temp'][3]-sdf['Log_x_Temp'][2]), (sdf['Log_x_Temp'][3]-sdf['Log_x_Temp'][2]))
        xx, yy = np.meshgrid(x, y)
        inter = interpolate.Rbf(sdf['killing'], sdf2['pre-prevalence'],
                                sdf[self.channel])
        zz = inter(xx, yy)
        cmap = 'rainbow_r'
        #plt.imshow(zz, cmap = plt.get_cmap(cmap, 10))
        c = plt.contour(xx, yy, zz, cmap=plt.get_cmap((cmap)), vmin=0, vmax=1)
        plt.clabel(c, inline=1, fontsize=8, fmt='%.2f')
        palette = sns.color_palette(cmap, 100)
        ax.scatter(
            sdf['killing'],
            sdf2['pre-prevalence'],
            20,
            color=[palette[int(x * 100)] for x in sdf[self.channel].values],
            alpha=0.5)
        #plt.savefig(os.path.join(plotdir, '123myplot123.pdf'), format='PDF')
        plt.title('ATSB Infection Reduction Heatmap')
        plt.show()
        plt.close('all')


if __name__ == '__main__':

    expid = '4ee73ebf-d59c-e811-a2c0-c4346bcb7275'
    am = AnalyzeManager(expid, analyzers=HeatmapAnalyzer())
    am.analyze()
Beispiel #21
0
class SCOutputAnalyzer(BaseAnalyzer):
    def __init__(self, filenames=None, output_path=None):
        super().__init__(filenames=filenames)
        self.output_path = output_path or "output"

    def select_simulation_data(self, data, simulation):
        # Apply is called for every simulations included into the experiment
        # We are simply storing the population data in the pop_data dictionary
        selected_data = {}
        selected_data['insetChart'] = data[self.filenames[0]]
        selected_data['sim_id'] = simulation.id
        if simulation.tags.get('__sample_index__') is not None:
            selected_data['sample'] = simulation.tags.get('__sample_index__')
        else:
            selected_data['sample'] = simulation.id
        selected_data['tags'] = simulation.tags
        selected_data['propertyReport'] = data[self.filenames[1]]
        return selected_data

    def finalize(self, all_data):
        with open(os.path.join(self.output_path, 'results.pkl'),
                  'wb') as pklfile:
            pickle.dump(all_data, pklfile)


# This code will analyze the latest experiment ran with the PopulationAnalyzer
if __name__ == "__main__":
    am = AnalyzeManager('latest', analyzers=SCOutputAnalyzer())
    am.analyze()
Beispiel #22
0
        # "IVM_final": "dd4ce507-c72f-e911-a2c5-c4346bcb7273",
        # "No_interventions": "e9e19499-c52f-e911-a2c5-c4346bcb7273"
        # "eSMC_IVM_everyone": "9609582b-7b3f-e911-a2c5-c4346bcb7273",
        # "eSMC10_IVM": "a47b2382-7b3f-e911-a2c5-c4346bcb7273",
        # "Ivermectin_excludingchildbearing": "1afccb61-8846-e911-a2c0-c4346bcb1554",
        'SMC_10': '154263e1-c031-e911-a2c5-c4346bcb7273'
    }

    for expt_name, exp_id in experiments.items():
        if 'Reference' in expt_name:
            am = AnalyzeManager(
                exp_list=exp_id,
                analyzers=[
                    SummaryAnalyzer(expt_name=expt_name,
                                    report_names=['Daily_Report'],
                                    sweep_variables=["Run_Number"]),
                    InsetAnalyzer(expt_name=expt_name,
                                  report_names=['InsetChart'],
                                  sweep_variables=["Run_Number"])
                ],
                force_analyze=False)

        else:
            am = AnalyzeManager(exp_list=exp_id,
                                analyzers=[
                                    SummaryAnalyzer(
                                        expt_name=expt_name,
                                        report_names=['Daily_Report'],
                                        sweep_variables=[
                                            "Run_Number", "Coverage",
                                            "Intervention_type"
Beispiel #23
0
                           'Malaria Team Folder', 'projects',
                           'map_intervention_impact', 'lookup_tables')

    run_type = "exp"

    if run_type == "exp":
        experiments = ["6e91248b-dbbd-e811-a2bd-c4346bcb1555"]

        for exp_id in experiments:

            am = AnalyzeManager(exp_list=exp_id,
                                analyzers=[
                                    PfPRAnalyzer(
                                        working_dir=out_dir,
                                        sweep_variables=[
                                            "Site_Name", "Run_Number",
                                            "x_Temporary_Larval_Habitat",
                                            "ACT_Coverage", "IRS_Coverage",
                                            "ITN_Coverage"
                                        ])
                                ],
                                force_analyze=True)

            print(am.experiments)
            am.analyze()

    elif run_type == "suite":

        exps = exps_for_suite_id("537b9041-0fa3-e811-a2c0-c4346bcb7275")
        print(exps)
        am = AnalyzeManager(exp_list=exps,
                            analyzers=[
    add_SerializationTimesteps(config_builder=cb_serializing,
                               timesteps=[timestep_to_reload],
                               end_at_final=True)

    s_pop_filename = "state-{0:05d}.dtk".format(timestep_to_reload)
    # Run the simulation
    exp_manager.run_simulations(config_builder=cb_serializing,
                                exp_name="Sample serialization test")

    exp_manager.wait_for_finished(verbose=True)

    # Download the state file
    simulation = exp_manager.experiment.simulations[0]
    da = DownloadAnalyzer(filenames=["output\\{}".format(s_pop_filename)],
                          output_path="temp")
    am = AnalyzeManager(sim_list=[simulation], analyzers=[da])
    am.analyze()

    # Add the state file
    cb_reload = DTKConfigBuilder.from_defaults('VECTOR_SIM')
    configure_site(cb_reload, 'Namawala')
    cb_reload.set_param("Simulation_Duration", 365)
    cb_reload.experiment_files.add_file(
        os.path.join("temp", simulation.id, s_pop_filename))

    load_Serialized_Population(cb_reload, 'Assets', [s_pop_filename])

    # Run !
    exp_manager.run_simulations(config_builder=cb_reload)

    # Cleanup temp directory
                            color=palette[c],
                            label=coverage)
                if i == num_interventions - 1:
                    ax.set_xlabel('%s no intervention' % self.data_channel)
                if s == 0:
                    ax.set_ylabel('%s with intervention' % self.data_channel)
                ax.set_title('%s %s' % (site, intervention))

        axes[-1].legend(title='coverage')
        plt.show()


if __name__ == "__main__":

    from simtools.Analysis.AnalyzeManager import AnalyzeManager
    from simtools.SetupParser import SetupParser

    SetupParser.default_block = 'HPC'
    SetupParser.init()
    sites = pd.read_csv("site_details.csv")

    analyzer = PfPRAnalyzer(expt_name='global_int_ex_test',
                            report_names=sites["name"].tolist(),
                            sweep_variables=[
                                "Run_Number", "x_Temporary_Larval_Habitat",
                                "intervention"
                            ])

    am = AnalyzeManager('d9bf3918-d8fc-e811-a2bd-c4346bcb1555',
                        analyzers=analyzer)
    am.analyze()
                  index=False)


if __name__ == "__main__":

    SetupParser.default_block = 'LOCAL'
    SetupParser.init()

    out_dir = os.path.join(projectdir, 'sim_data')

    experiments = {
        "SMC_ivermection_scenarios_uncorrelated":
        "01768676-af24-e911-a2be-c4346bcb1554"
    }

    for expt_name, exp_id in experiments.items():
        am = AnalyzeManager(exp_list=exp_id,
                            analyzers=[
                                InsetAnalyzer(working_dir=out_dir,
                                              expt_name=expt_name,
                                              report_names=['InsetChart'],
                                              sweep_variables=[
                                                  "Run_Number", "Coverage",
                                                  "Intervention_type"
                                              ])
                            ],
                            force_analyze=False)

        print(am.experiments)
        am.analyze()
Beispiel #27
0
        df.to_csv('%s.csv' % self.output_fname)

        fig = plt.figure()
        ax = fig.gca()
        max_EIR = np.max(df['aEIR'])
        ax.scatter(np.log10(df['aEIR']), df['PfPR_2to10'])

        ax.set_xlabel('aEIR')
        ax.set_ylabel('PfPR [2 to 10]')
        ax.set_ylim(0, 1)

        plt.title('PfPR [2 to 10] v. aEIR')

        plt.savefig('%s.png' % self.output_fname)
        plt.savefig('%s.pdf' % self.output_fname, format='PDF')

        plt.close('all')


if __name__ == '__main__':

    expids = ['4f157763-d898-ea11-a2c5-c4346bcb1550']
    expnames = ['EIR_sweep']

    for expname, expid in zip(expnames, expids):
        os.makedirs(os.path.join(wdir, expid), exist_ok=True)
        output_fname = os.path.join(wdir, expid, expname)
        am = AnalyzeManager(expid, analyzers=EIR_PfPR_Analyzer(output_fname))
        am.analyze()
            experiments_ids.append(experiment_manager.experiment.exp_id)

    # Dump the experiment ids for resume
    with open('ids.json', 'w') as out:
        json.dump(experiments_ids, out)

    # While the experiments are running, we are analyzing every 30 seconds
    while True:
        print("Analyzing !")

        # Determine if we are done at the beginning of the loop
        # We will still analyze everything even if we are done
        finished = all([em.finished() for em in experiments])

        # Create a new AnalyzeManager and add experiment and analyzer
        am = AnalyzeManager(verbose=False)
        for em in experiments:
            am.add_experiment(em.experiment)

        analyzer = DownloadAnalyzerTPI(
            filenames=['output\\ReportHIVByAgeAndGender.csv'],
            TPI_tag="TPI",
            ignore_TPI=False,
            REP_tag="TPI",
            ignore_REP=True,
            output_path=output_directory)

        am.add_analyzer(analyzer)

        # Make sure we refresh our set of experiments
                if "c" in locals():
                    plt.plot(data, label=label, c=c)
                else:
                    plt.plot(data, label=label)

                plt.xlabel("Simulation Time")

        plt.ylabel(self.channel)
        plt.legend()
        plt.show()


if __name__ == "__main__":
    from simtools.Analysis.AnalyzeManager import AnalyzeManager

    am = AnalyzeManager()
    # am.add_analyzer(basic_inset_channel_plotter("True Prevalence"))

    am.add_analyzer(
        inset_channel_plotter("True Prevalence",
                              color_by_expt=True,
                              label_by_expt=True,
                              label_dict={
                                  "7e3073b4-d9f1-e811-a2bd-c4346bcb1555":
                                  "full campaign",
                                  "a2e981fe-d9f1-e811-a2bd-c4346bcb1555":
                                  "no 2011 bednets"
                              },
                              ref_date="2001-01-01"))
    am.add_experiment("7e3073b4-d9f1-e811-a2bd-c4346bcb1555")
    am.add_experiment("a2e981fe-d9f1-e811-a2bd-c4346bcb1555")
from simtools.Analysis.AnalyzeManager import AnalyzeManager
from simtools.Analysis.BaseAnalyzers import DownloadAnalyzer

if __name__ == "__main__":
    analyzer = DownloadAnalyzer(
        filenames=['output\\InsetChart.json', 'config.json'])
    am = AnalyzeManager('latest', analyzers=analyzer)
    am.analyze()