Esempio n. 1
0
def incidence_grabby(expname, hfca, rank, output_fname=None) :
    calib_folder = calib_base + expname +"/"
    if not output_fname:
        output_fname = calib_folder + "rank{}_cases".format(rank)

    LL_fname = calib_folder + "_plots/LL_all.csv"
    LL_df = pd.read_csv(LL_fname)
    LL_df.sort_values(by='total', ascending=False, inplace=True)
    LL_df.reset_index(inplace=True)

    sample = LL_df.loc[rank, 'sample']
    iteration = LL_df.loc[rank, 'iteration']

    am = AnalyzeManager()
    # am.add_analyzer(IncidencePlotter(GriddedCalibSite(hfca),save_file=output_fname))
    am.add_analyzer(IncidencePlotter(hfca, save_file=output_fname, save_csv=True))

    with open(calib_folder+"iter{}/IterationState.json".format(iteration)) as fin:
        iteration_state = json.loads(fin.read())
    siminfo = OrderedDict(iteration_state['simulations'])
    for item in list(siminfo.items()) :
        if item[1]['__sample_index__'] == sample :
            simid = item[0]
            am.add_simulation(simid)
    am.analyze()
Esempio n. 2
0
def RDT_grabby(expname, rank, output_fname=None, plot_bairros=True) :
    calib_folder = calib_base + expname +"/"
    if not output_fname:
        output_fname = calib_folder + "rank{}_rdt".format(rank)

    LL_fname = calib_folder + "_plots/LL_all.csv"
    LL_df = pd.read_csv(LL_fname)
    LL_df.sort_values(by='total', ascending=False, inplace=True)
    LL_df.reset_index(inplace=True)

    sample = LL_df.loc[rank, 'sample']
    iteration = LL_df.loc[rank, 'iteration']

    start_date = "2009-01-01"

    am = AnalyzeManager()
    am.add_analyzer(PrevAnalyzer(start_date=start_date,
                                 save_file=output_fname,
                                 cait_output_mode=True,
                                 plot_bairros=plot_bairros))

    with open(calib_folder+"iter{}/IterationState.json".format(iteration)) as fin:
        iteration_state = json.loads(fin.read())
    siminfo = OrderedDict(iteration_state['simulations'])
    for item in list(siminfo.items()) :
        if item[1]['__sample_index__'] == sample :
            simid = item[0]
            # print("Sim ID: ",simid)
            am.add_simulation(simid)
    am.analyze()
Esempio n. 3
0
def plot_vectors(exp_id, sample, save_file=None):
    am = AnalyzeManager()
    am.add_experiment(retrieve_experiment(exp_id))
    am.add_analyzer(
        VectorSpeciesReportAnalyzer(sample,
                                    save_file=save_file,
                                    channel='Daily HBR'))
    am.analyze()
Esempio n. 4
0
def vector(expname, calib_stage, rank) :
    calib_folder = calib_base + expname +"/"
    output_fname = calib_folder + "rank{}_vectors".format(rank)

    LL_fname = calib_folder + "_plots/LL_all.csv"
    LL_df = pd.read_csv(LL_fname)
    LL_df.sort_values(by='total', ascending=False, inplace=True)
    LL_df.reset_index(inplace=True)

    sample = LL_df.loc[rank, 'sample']
    iteration = LL_df.loc[rank, 'iteration']

    am = AnalyzeManager()
    am.add_analyzer(VectorSpeciesReportAnalyzer(save_file=output_fname, channel='Daily HBR'))

    with open(calib_folder+"iter{}/IterationState.json".format(iteration)) as fin:
        iteration_state = json.loads(fin.read())
    siminfo = OrderedDict(iteration_state['simulations'])
    for item in list(siminfo.items()) :
        if item[1]['__sample_index__'] == sample :
            simid = item[0]
    am.add_simulation(simid)
    am.analyze()
########################
# other configurations
########################

# If the base collection containing CMS exists, use it
# If not, use the local
if SetupParser.default_block == "HPC":
    try:
        cb.set_collection_id('CMS 0.82 Pre-release')
    except SimulationAssets.InvalidCollection:
        cb.set_experiment_executable('inputs/compartments/compartments.exe')
        cb.set_dll_root('inputs/compartments')
else:
    cb.set_experiment_executable('inputs/compartments/compartments.exe')
    cb.set_dll_root('inputs/compartments')

run_sim_args = {"config_builder": cb, "exp_name": "First Default CMS run"}

if __name__ == "__main__":
    SetupParser.init()
    em = ExperimentManagerFactory.from_cb(run_sim_args["config_builder"])
    em.run_simulations(exp_name=run_sim_args["exp_name"])

    # Wait for the simulation to complete
    em.wait_for_finished(verbose=True)

    # Analyze
    am = AnalyzeManager(exp_list='latest')
    am.add_analyzer(SimpleCMSAnalyzer())
    am.analyze()
Esempio n. 6
0
    # am.add_experiment(retrieve_experiment("15a20ddd-2a36-e811-a2bf-c4346bcb7274"))  # facazissa iter5. best 0.  4/1 10:30pm
    # am.add_experiment(retrieve_experiment("86413a54-0d36-e811-a2bf-c4346bcb7274"))  # magude iter3. best 10.  4/1 10:30pm  X
    # am.add_experiment(retrieve_experiment("15a1d9fe-2f36-e811-a2bf-c4346bcb7274"))  # mahel iter9.  best 0. 4/1
    #  am.add_experiment(retrieve_experiment("0fc16f8f-2636-e811-a2bf-c4346bcb7274")) # mapulanguene iter9. best 10.  4/1 10:30pm
    # am.add_experiment(retrieve_experiment("f5873afe-1336-e811-a2bf-c4346bcb7274"))  # moine iter6. best 0 4/1
    # am.add_experiment(retrieve_experiment("19794550-c135-e811-a2bf-c4346bcb7274"))  # motaze iter1. best 15 4/1
    # am.add_experiment(retrieve_experiment("e6f8c635-2d36-e811-a2bf-c4346bcb7274"))  # panjane iter6. best 0 4/1

    # am.add_experiment(retrieve_experiment("6fe0132a-c135-e811-a2bf-c4346bcb7274")) # faca stage1, iter1, best 9
    # am.add_experiment(retrieve_experiment("86413a54-0d36-e811-a2bf-c4346bcb7274")) # m-s stage 1. iter3, best 12
    # am.add_experiment(retrieve_experiment("eb30545d-e536-e811-a2bf-c4346bcb7274")) # m-s stage 2.  ite3, best 6

    # am.add_experiment(retrieve_experiment("d4b08d09-1835-e811-a2bf-c4346bcb7274")) #caputine iter12. best 8.
    # am.add_experiment(retrieve_experiment("0fc97f4a-4634-e811-a2bf-c4346bcb7274"))  # chichuco iter0.  best 3
    # am.add_experiment(retrieve_experiment("f67437d5-4e34-e811-a2bf-c4346bcb7274"))  # chicutso iter2. best 3
    # am.add_experiment(retrieve_experiment("d7d2a0be-a234-e811-a2bf-c4346bcb7274")) # facazissa iter3.  best 12
    # am.add_experiment(retrieve_experiment("3240a906-9e33-e811-a2bf-c4346bcb7274"))  # magude iter0. best 21.
    # am.add_experiment(retrieve_experiment("6cd7957f-cb34-e811-a2bf-c4346bcb7274"))  # mahel iter6. best 11.
    # am.add_experiment(retrieve_experiment("0dbd4e00-cc34-e811-a2bf-c4346bcb7274")) # mapulanguene iter8. best 3
    # am.add_experiment(retrieve_experiment("777c34a8-dc34-e811-a2bf-c4346bcb7274"))  # moine iter6. best 8
    # am.add_experiment(retrieve_experiment("5171d868-4634-e811-a2bf-c4346bcb7274"))  # motaze iter0. best 11
    # am.add_experiment(retrieve_experiment("7a5ab67b-dc34-e811-a2bf-c4346bcb7274"))  # panjane iter8. best 17

    # am.add_experiment(retrieve_experiment("2ecf9cd7-9c35-e811-a2bf-c4346bcb7274")) #aggregate 2014.  iter2, best 20
    # am.add_experiment(retrieve_experiment("d8cb3061-ae35-e811-a2bf-c4346bcb7274")) #aggregate 2014,2015.  iter2, best 5

    am.add_experiment(
        retrieve_experiment("2f76368f-bc57-e811-a2bf-c4346bcb7274"))

    am.add_analyzer(PrevAnalyzer(cait_output_mode=True, gatesreview=True))
    am.analyze()
Esempio n. 7
0
from dtk.utils.analyzers import TimeseriesAnalyzer, VectorSpeciesAnalyzer
from simtools.AnalyzeManager.AnalyzeManager import AnalyzeManager
from simtools.Utilities.Experiments import retrieve_experiment

if __name__ == "__main__":
    # Retrieve a couple of test experiments
    experiment1 = retrieve_experiment('158cc530-780e-e711-9400-f0921c16849c')
    experiment2 = retrieve_experiment('c62aa746-780e-e711-9400-f0921c16849c')

    # Create an analyze manager
    # Note that we are adding the experiments that we want to analyze
    am = AnalyzeManager(exp_list=[experiment1, experiment2])

    # Add the TimeSeriesAnalyzer to the manager
    am.add_analyzer(TimeseriesAnalyzer())
    am.add_analyzer(VectorSpeciesAnalyzer())

    # Analyze
    am.analyze()

if __name__=="__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # Corrected stepd
    # am.add_experiment(retrieve_experiment("43cac760-cbd6-e711-9414-f0921c16b9e5")) # bbondo
    # am.add_experiment(retrieve_experiment("a31b516a-cbd6-e711-9414-f0921c16b9e5"))  # chabbobboma
    # am.add_experiment(retrieve_experiment("1ecdf372-cbd6-e711-9414-f0921c16b9e5")) # chisanga
    # am.add_experiment(retrieve_experiment("957e6159-32d6-e711-9414-f0921c16b9e5")) # chiyabi
    # am.add_experiment(retrieve_experiment("9669907b-cbd6-e711-9414-f0921c16b9e5"))  # luumbo
    # am.add_experiment(retrieve_experiment("fbe40809-ccd6-e711-9414-f0921c16b9e5"))  # munyumbwe
    # am.add_experiment(retrieve_experiment("8aadd6a0-cbd6-e711-9414-f0921c16b9e5"))  # nyanga chaamwe
    # am.add_experiment(retrieve_experiment("d18a9aa8-cbd6-e711-9414-f0921c16b9e5"))  # sinafala
    am.add_experiment(retrieve_experiment("d28a9aa8-cbd6-e711-9414-f0921c16b9e5"))  # sinamalima

    # Old MBGSR
    # am.add_experiment(retrieve_experiment("7f188957-2fe1-e711-9414-f0921c16b9e5")) # bbondo
    # am.add_experiment(retrieve_experiment("f60d69eb-2fe1-e711-9414-f0921c16b9e5"))  # chabbobboma
    # am.add_experiment(retrieve_experiment("7aa30068-2fe1-e711-9414-f0921c16b9e5")) # chisanga
    # am.add_experiment(retrieve_experiment("d57bccae-25e1-e711-9414-f0921c16b9e5")) # chiyabi
    # am.add_experiment(retrieve_experiment("5d5cff6d-2fe1-e711-9414-f0921c16b9e5"))  # luumbo
    # am.add_experiment(retrieve_experiment("cf37cd7b-2fe1-e711-9414-f0921c16b9e5"))  # munyumbwe
    # am.add_experiment(retrieve_experiment("94aa85fb-2fe1-e711-9414-f0921c16b9e5"))  # nyanga chaamwe
    # am.add_experiment(retrieve_experiment("f5c0fb13-30e1-e711-9414-f0921c16b9e5"))  # sinafala
    # am.add_experiment(retrieve_experiment("33b92b39-30e1-e711-9414-f0921c16b9e5"))  # sinamalima

    am.add_analyzer(RDTPrevAnalyzer())
    am.analyze()
Esempio n. 9
0

    def finalize(self):
        # print self.my_data
        print("")

    def plot(self):
        import matplotlib.pyplot as plt

        # Plot histogram of trips
        for sim_id, data in self.n_trips.items():
            # data only contains data for travellers.  Need to add in "zero trips" for everyone who didn't travel.
            n_couch = self.pop_init[sim_id] - self.n_travellers[sim_id]
            full_data = np.append(data, np.zeros(int(n_couch)))
            plt.hist(full_data,histtype='stepfilled',alpha=0.4,log=True, label=self.metadata[sim_id])

        plt.legend()

        plt.show()



if __name__=="__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    am.add_experiment(retrieve_experiment("151f8b4b-867c-e711-9401-f0921c16849d"))

    am.add_analyzer(MigrationAnalyzer())
    am.analyze()
Esempio n. 10
0
def plot_RDT(exp_id, sample, save_file=None, **kwargs):
    am = AnalyzeManager()
    am.add_experiment(retrieve_experiment(exp_id))
    am.add_analyzer(
        prevalence_plot_analyzer(catch, sample, save_file=save_file, **kwargs))
    am.analyze()
        plt.tight_layout()
        # if self.save_file:
        #     # if self.cait_output_mode:
        #     #     MozambiqueExperiment.save_figs_for_caitlin(fig,self.save_file)
        #     # else:
        if not self.save_file:
            self.save_file = save_file = "figs/{}".format(self.catch)
        # plt.savefig(self.save_file + ".pdf")
        # plt.savefig(self.save_file + ".png")
        # else:
        plt.show()
        print("Done!")


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()
    # am.add_experiment(retrieve_experiment("0a373d77-1f93-e811-a2c0-c4346bcb7275")) # chichuco
    # am.add_experiment(retrieve_experiment("0d801fc0-3c92-e811-a2c0-c4346bcb7275")) # chicutso
    am.add_experiment(
        retrieve_experiment(
            "c5c3c5bb-a79c-e811-a2c0-c4346bcb7275"))  # magude-sede-facazissa
    # am.add_experiment(retrieve_experiment("210bcb89-e696-e811-a2c0-c4346bcb7275")) # mahel
    # am.add_experiment(retrieve_experiment("10238aac-7593-e811-a2c0-c4346bcb7275")) # mapulanguene
    # am.add_experiment(retrieve_experiment("85bef741-2d97-e811-a2c0-c4346bcb7275")) # moine
    # am.add_experiment(retrieve_experiment("140fe8a7-1194-e811-a2c0-c4346bcb7275")) # motaze
    # am.add_experiment(retrieve_experiment("b1c79146-6194-e811-a2c0-c4346bcb7275")) # panjane-caputine

    am.add_analyzer(PrevAnalyzer())
    am.analyze()
Esempio n. 12
0
            experiment_manager.run_simulations(exp_name=COMPS_experiment_name,
                                               exp_builder=experiment_builder,
                                               suite_id=suite_id)
            experiments.append(experiment_manager)
            experiments_ids.append(experiment_manager.experiment.exp_id)

        # Dump the experiment ids for resume
        with open('ids.json', 'w') as out:
            json.dump(experiments_ids, out)

    # Every experiments are created at this point -> Analyze
    am = AnalyzeManager(verbose=False, create_dir_map=False)
    for em in experiments:
        am.add_experiment(em.experiment)
#    am.add_analyzer(DownloadAnalyzerTPI(['output\\DemographicsSummary.json', 'config.json', 'output\\ReportHIVART.csv', 'output\\ReportHIVByAgeAndGender.csv'],
#                                        output_dir='Test HIV 1'))
    am.add_analyzer(
        DownloadAnalyzerTPI(['output\\ReportHIVByAgeAndGender.csv'],
                            output_dir='Nyanza Base Case'))

    # While the experiments are running, we are analyzing every 15 seconds
    while not all([em.finished() for em in experiments]):
        map(lambda e: e.refresh_experiment(), experiments)
        print("Analyzing !")
        am.analyze()
        print("Waiting 15 seconds")
        time.sleep(15)

    # Analyze one last time when everything is complete
    am.analyze()
Esempio n. 13
0
def catalyst(args, unknownArgs):
    """
    Catalyst run-and-analyze process as ported from the test team.
    Programmatic-only arguments:
        args.mode : used by FidelityReportExperimentDefinition, default: 'prod'
        args.report_label : attached to the experiment name
        args.debug : True/False, passed into FidelityReportAnalyzer, default: False
    :param args:
    :param unknownArgs:
    :return:
    """
    from dtk.utils.builders.sweep import GenericSweepBuilder
    from catalyst_report.fidelity_report_analyzer import FidelityReportAnalyzer
    from catalyst_report.fidelity_report_experiment_definition import FidelityReportExperimentDefinition
    import catalyst_report.utils as catalyst_utils
    from simtools.Analysis.AnalyzeManager import AnalyzeManager

    # we're going to do a dtk run, then a set-piece analysis. But first we need to do some overrides
    # to get the run part to do the desired parameter sweep.

    mod = args.loaded_module

    # when run with 'dtk catalyst', run_sim_args['exp_name'] will have additional information appended.
    mod.run_sim_args[
        'exp_name'] = mod.run_sim_args['exp_name'] + '-development'

    # lining up the arguments expected by FidelityReportExperimentDefinition
    args.sweep = args.sweep_method

    # hidden, programmatic arguments
    args.mode = args.mode if hasattr(args, 'mode') else 'prod'
    args.report_label = args.report_label if hasattr(args,
                                                     'report_label') else None
    args.debug = args.debug if hasattr(args, 'debug') else False

    # determine which report is being asked for. If not specified, default to what the config.json file says
    # ck4, this should go somewhere else, on a Config object of some sort? (prob not the builder, though)
    report_type_mapping = {
        'DENGUE_SIM': 'dengue',
        'GENERIC_SIM': 'generic',
        'HIV_SIM': 'hiv',
        'MALARIA_SIM': 'malaria',
        'POLIO_SIM': 'polio',
        'STI_SIM': 'sti',
        'TB_SIM': 'tb',
        'TYPHOID_SIM': 'typhoid',
        'VECTOR_SIM': 'generic'
    }
    if args.report_type:
        report_type = args.report_type
    else:
        sim_type = mod.run_sim_args['config_builder'].config['parameters'][
            'Simulation_Type']
        report_type = report_type_mapping.get(sim_type, None)
        if not report_type:
            raise KeyError(
                'Default report type could not be determined for sim_type: %s. Report type must be specified'
                ' via -r flag.' % sim_type)

    # Create and set a builder to sweep over population scaling or model timestep
    reports = catalyst_utils.load_report_definitions(
        definitions_filename=args.report_definitions)
    if report_type in reports:
        args.report_channel_list = reports[report_type]['inset_channel_names']
    else:
        raise Exception('Invalid report: %s. Available reports: %s' %
                        (report_type, sorted(reports.keys())))
    catalyst_config = catalyst_utils.load_sweep_configs(
        sweep_type=args.sweep_type, config_filename=args.sweep_definitions)
    defn = FidelityReportExperimentDefinition(catalyst_config, args)

    # redefine the experiment name so it doesn't conflict with the likely follow-up non-catalyst experiment
    mod.run_sim_args['exp_name'] = 'Catalyst-' + mod.run_sim_args['exp_name']

    # define the sweep to perform
    sweep_dict = {
        'Run_Number': range(1,
                            int(defn['nruns']) + 1),
        defn['sweep_param']: defn['sweep_values']
    }
    mod.run_sim_args['exp_builder'] = GenericSweepBuilder.from_dict(sweep_dict)

    # overwrite spatial output channels to those used in the catalyst report
    spatial_channel_names = defn['spatial_channel_names']
    if len(spatial_channel_names) > 0:
        mod.run_sim_args['config_builder'].enable('Spatial_Output')
        mod.run_sim_args['config_builder'].params[
            'Spatial_Output_Channels'] = spatial_channel_names
    else:
        mod.run_sim_args['config_builder'].disable('Spatial_Output')
        mod.run_sim_args['config_builder'].params[
            'Spatial_Output_Channels'] = []

    # now run if no preexisting experiment id was provided
    if not args.experiment_id:
        # we must always block so that we can run the analysis at the end; run and analyze!
        args.blocking = True
        experiment_manager = run(args, unknownArgs)
        experiment = experiment_manager.experiment
        print('Done running experiment: %s' % experiment.exp_id)
    else:
        experiment = retrieve_experiment(args.experiment_id)

    # Create an analyze manager
    am = AnalyzeManager(exp_list=[experiment], verbose=False)

    # Add the TimeSeriesAnalyzer to the manager and do analysis
    # ck4, is there a better way to specify the first 4 arguments? The DTKCase from Test-land might be nicer.
    # After all, the names COULD be different
    analyzer = FidelityReportAnalyzer(
        'catalyst_report',
        'config.json',
        mod.run_sim_args['config_builder'].get_param(
            'Demographics_Filenames')[0],
        experiment_definition=defn,
        experiment_id=experiment.exp_id,
        experiment_name=experiment.exp_name,
        label=args.report_label,
        time_series_step_from=defn['step_from'],
        time_series_step_to=defn['step_to'],
        time_series_equal_step_count=True,
        raw_data=True,
        debug=args.debug)
    am.add_analyzer(analyzer)
    am.analyze()

    import webbrowser
    webbrowser.open_new("file:///{}".format(
        os.path.join(os.getcwd(), "catalyst_report", "summary_report.html")))
Esempio n. 14
0
            int(x["year"]) + 2010,
            str(int(x["month"])).zfill(2)),
                              axis=1)
        # print("mdate")
        df["mdate"] = df.apply(lambda x: date_to_mdate(x["date"]), axis=1)
        # print("plot")
        # ax.plot(df["year"] * 12 + df["month"], df["cases"], *args, **kwargs)
        ax.plot_date(df["mdate"], df["cases"], *args, **kwargs)

        ax.set_xlabel("Date")
        ax.set_ylabel("Cases")
        ax.set_xlim([date_to_mdate("2010-01-01"), date_to_mdate("2017-01-01")])
        # ax.tick_params(direction="inout")

    def uid(self):
        ''' A unique identifier of site-name and analyzer-name. '''
        return '_'.join([self.site.name, self.name])


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # Calibration experiments:
    am.add_experiment(
        retrieve_experiment("a0bee2bd-f8b5-e811-a2c0-c4346bcb7275"))

    am.add_analyzer(incidence_likelihood(zambia_calib_site("bbondo")))
    am.analyze()
Esempio n. 15
0
            for exp_id in self.pop_data.keys():
                plt.plot(
                    np.array(self.raw_pop_data[exp_id][-2]) /
                    self.tot_pop[exp_id][-2])
            ax.set_title("Late")
            ax.set_xticks(range(24))
            ax.set_xticklabels(self.age_bins)
            plt.show()

        #
        # for exp_id in self.pop_data.keys():
        #     plt.plot_date(self.report_times, self.pop_data[exp_id],fmt='-',c=c,linewidth=lw,label=label,alpha=0.4)
        #     plt.plot_date(self.report_times, self.pop_data[exp_id], fmt='-', c=c, linewidth=lw, label=label, alpha=0.4)
        #     plt.plot_date(self.report_times, self.pop_data[exp_id], fmt='-', c=c, linewidth=lw, label=label, alpha=0.4)
        #     plt.plot_date(self.report_times, self.pop_data[exp_id], fmt='-', c=c, linewidth=lw, label=label, alpha=0.4)
        # plt.legend([s['environment'] for s in self.metadata.values()])


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    am.add_experiment(
        retrieve_experiment("f4ecdcc6-768c-e711-9401-f0921c16849d"))  # L1
    # am.add_experiment(retrieve_experiment("001a9f44-758c-e711-9401-f0921c16849d")) # L5
    am.add_experiment(
        retrieve_experiment("4188b9de-e28c-e711-9401-f0921c16849d"))  # L6

    am.add_analyzer(AgeStratificationAnalyzer())
    am.analyze()
Esempio n. 16
0
            experiment_manager.run_simulations(exp_name=COMPS_experiment_name,
                                               exp_builder=experiment_builder,
                                               suite_id=suite_id)
            experiments.append(experiment_manager)
            experiments_ids.append(experiment_manager.experiment.exp_id)

        # Dump the experiment ids for resume
        with open('ids.json', 'w') as out:
            json.dump(experiments_ids, out)

    # Every experiments are created at this point -> Analyze
    am = AnalyzeManager(verbose=False, create_dir_map=False)
    for em in experiments:
        am.add_experiment(em.experiment)
#    am.add_analyzer(DownloadAnalyzerTPI(['output\\DemographicsSummary.json', 'config.json', 'output\\ReportHIVART.csv', 'output\\ReportHIVByAgeAndGender.csv'],
#                                        output_dir='Test HIV 1'))
    am.add_analyzer(
        DownloadAnalyzerTPI(['output\\ReportHIVByAgeAndGender.csv'],
                            output_dir='..\\Output\\ReportHIVByAgeAndGender'))

    # While the experiments are running, we are analyzing every 15 seconds
    while not all([em.finished() for em in experiments]):
        map(lambda e: e.refresh_experiment(), experiments)
        print("Analyzing !")
        am.analyze()
        print("Waiting 15 seconds")
        time.sleep(15)

    # Analyze one last time when everything is complete
    am.analyze()
Esempio n. 17
0
        selected = [
            p.selected_data[id(self)] for p in parsers.values()
            if id(self) in p.selected_data
        ]
        self.data = pd.concat(selected)

    def finalize(self):
        import seaborn as sns
        sns.set_style("darkgrid")

        fig = plt.figure(figsize=(10, 6))
        ax = fig.gca()
        for a, adf in self.data.groupby('sim_id'):
            for s, sdf in adf.groupby('species'):
                ax.plot(sdf['date'], sdf[self.channel], label=s)
        ax.legend()
        plt.ylabel(self.channel)
        if self.save_file:
            plt.savefig(self.save_file + ".png")
            plt.savefig(self.save_file + ".pdf")
        else:
            plt.show()


if __name__ == '__main__':

    am = AnalyzeManager()
    am.add_analyzer(VectorSpeciesReportAnalyzer())
    am.add_simulation('4047a20f-b33d-e811-a2bf-c4346bcb7274')
    am.analyze()
Esempio n. 18
0
            date_to_mdate("2013-01-01"),
            date_to_mdate("2014-01-01"),
            date_to_mdate("2015-01-01"),
            date_to_mdate("2016-01-01"),
            date_to_mdate("2017-01-01"),
            date_to_mdate("2018-01-01"),
            date_to_mdate("2019-01-01")
        ])

        # plt.ylim([-0.01,0.25])
        plt.ylabel("RDT Prevalence")
        plt.legend(frameon=True)
        plt.tight_layout()
        if self.save_file:
            plt.savefig(self.save_file + ".pdf")
            plt.savefig(self.save_file + ".png")
        else:
            plt.show()


if __name__ == "__main__":
    am = AnalyzeManager()
    # am.add_experiment(retrieve_experiment("cdb12c2d-61c3-e811-a2bd-c4346bcb1555"))
    am.add_experiment(
        retrieve_experiment("9df3a55a-63c3-e811-a2bd-c4346bcb1555"))
    # am.add_analyzer(custom_prev_plot_analyzer("chiyabi","C0", 3))
    # am.add_analyzer(custom_prev_plot_analyzer("chiyabi","C1", 4))
    am.add_analyzer(custom_prev_plot_analyzer("chiyabi", "C2", 5))
    # am.add_analyzer(custom_prev_plot_analyzer("chiyabi","C3", 6))
    am.analyze()