def _analyze(self, experiment, analyzers, points_ran):
        """
        This method is the in-common route for Resamplers to analyze simulations for liklihood.
        :param experiment: the experiment to analyze, should be from self._run()
        :param points_ran: Points objects that were just _run()
        :return: The supplied points_ran with their .likelihood attribute set, AND the direct results of the analyzer
                 as a list.
        """
        am = AnalyzeManager(analyzers=analyzers, exp_list=experiment)
        am.analyze()

        # The provided likelihood analyzer MUST set self.result to be a list of Point objects
        # with the .likelihood attribute set to the likelihood value in its .finalize() method.
        results = am.analyzers[0].result.tolist()

        for i in range(len(results)):
            # Add the likelihood
            points_ran[i].likelihood = results[i]

        # verify that the returned points all have a likelihood attribute set
        likelihoods_are_missing = True in {
            point.likelihood is None
            for point in points_ran
        }
        if likelihoods_are_missing:
            raise Exception(
                'At least one Point object returned by the provided analyzer does not have '
                'its .likelihood attribute set.')

        return points_ran, results
def analyze(args, unknownArgs, builtinAnalyzers):
    # validate parameters
    if args.config_name is None:
        logger.error('Please provide Analyzer (-a or --config_name).')
        exit()

    # Retrieve what we need
    itemids = args.itemids
    batch_name = args.batch_name

    # collect all experiments and simulations
    exp_dict, sim_dict = collect_experiments_simulations(itemids)

    # consider batch existing case
    exp_dict, sim_dict = consolidate_experiments_with_options(exp_dict, sim_dict, batch_name)

    # check status for each experiment
    if not args.force:
        check_status(exp_dict.values())

    # collect all analyzers
    analyzers = collect_analyzers(args, builtinAnalyzers)

    if not exp_dict and not sim_dict:
        # No experiment specified -> using latest experiment
        latest = DataStore.get_most_recent_experiment()
        exp_dict[latest.exp_id] = latest

    # create instance of AnalyzeManager
    analyzeManager = AnalyzeManager(exp_list=exp_dict.values(), sim_list=sim_dict.values(), analyzers=analyzers)

    exp_ids_to_be_saved = list(set(exp_dict.keys()) - set(analyzeManager.experiments_simulations.keys()))
    exp_to_be_saved = [exp_dict[exp_id] for exp_id in exp_ids_to_be_saved]

    # if batch name exists, always save experiments
    if batch_name:
        # save/create batch
        save_batch(batch_name, exp_to_be_saved, sim_dict.values())
    # Only create a batch if we pass more than one experiment or simulation in total
    elif len(exp_dict) + len(sim_dict) > 1:
        # check if there is any existing batch containing the same experiments
        batch_existing = check_existing_batch(exp_dict, sim_dict)

        if batch_existing is None:
            # save/create batch
            save_batch(batch_name, exp_to_be_saved, sim_dict.values())
        else:
            # display the existing batch
            logger.info('\nBatch: %s (id=%s)' % (batch_existing.name, batch_existing.id))

    # start to analyze
    analyzeManager.analyze()

    # remove empty batches
    clean_batch()

    return analyzeManager
Beispiel #3
0
    def analyze_iteration(self):
        """
        Analyze the output of completed simulations by using the relevant analyzers by site.
        Cache the results that are returned by those analyzers.
        """
        if self.results:
            logger.info('Reloading results from cached iteration state.')
            return self.results['total']

        if not self.exp_manager:
            self.exp_manager = ExperimentManagerFactory.from_experiment(
                self.experiment_id)

        from simtools.Analysis.BaseAnalyzers.BaseAnalyzer import BaseAnalyzer
        from simtools.Analysis.AnalyzeManager import AnalyzeManager as am
        if all(isinstance(a, BaseAnalyzer) for a in self.analyzer_list):
            analyzerManager = am(exp_list=self.exp_manager.experiment,
                                 analyzers=self.analyzer_list,
                                 working_dir=self.iteration_directory,
                                 verbose=True)
        else:
            analyzerManager = AnalyzeManager(
                exp_list=self.exp_manager.experiment,
                analyzers=self.analyzer_list,
                working_dir=self.iteration_directory)
        analyzerManager.analyze()

        # Ask the analyzers to cache themselves
        cached_analyses = {
            a.uid if not callable(a.uid) else a.uid(): a.cache()
            for a in analyzerManager.analyzers
        }
        logger.debug(cached_analyses)

        # Get the results from the analyzers and ask the next point how it wants to cache them
        results = pd.DataFrame({
            a.uid if not callable(a.uid) else a.uid(): a.result
            for a in analyzerManager.analyzers
        })
        cached_results = self.next_point_algo.get_results_to_cache(results)

        # Store the analyzers and results in the iteration state
        self.analyzers = cached_analyses
        self.results = cached_results

        # Set those results in the next point algorithm
        self.next_point_algo.set_results_for_iteration(self.iteration, results)

        # Update the summary table and all the results
        self.all_results, self.summary_table = self.next_point_algo.update_summary_table(
            self, self.all_results)
        logger.info(self.summary_table)
Beispiel #4
0
def incidence_grabby(expname, hfca, rank, output_fname=None) :
    calib_folder = calib_base + expname +"/"
    if not output_fname:
        output_fname = calib_folder + "rank{}_cases".format(rank)

    LL_fname = calib_folder + "_plots/LL_all.csv"
    LL_df = pd.read_csv(LL_fname)
    LL_df.sort_values(by='total', ascending=False, inplace=True)
    LL_df.reset_index(inplace=True)

    sample = LL_df.loc[rank, 'sample']
    iteration = LL_df.loc[rank, 'iteration']

    am = AnalyzeManager()
    # am.add_analyzer(IncidencePlotter(GriddedCalibSite(hfca),save_file=output_fname))
    am.add_analyzer(IncidencePlotter(hfca, save_file=output_fname, save_csv=True))

    with open(calib_folder+"iter{}/IterationState.json".format(iteration)) as fin:
        iteration_state = json.loads(fin.read())
    siminfo = OrderedDict(iteration_state['simulations'])
    for item in list(siminfo.items()) :
        if item[1]['__sample_index__'] == sample :
            simid = item[0]
            am.add_simulation(simid)
    am.analyze()
Beispiel #5
0
def RDT_grabby(expname, rank, output_fname=None, plot_bairros=True) :
    calib_folder = calib_base + expname +"/"
    if not output_fname:
        output_fname = calib_folder + "rank{}_rdt".format(rank)

    LL_fname = calib_folder + "_plots/LL_all.csv"
    LL_df = pd.read_csv(LL_fname)
    LL_df.sort_values(by='total', ascending=False, inplace=True)
    LL_df.reset_index(inplace=True)

    sample = LL_df.loc[rank, 'sample']
    iteration = LL_df.loc[rank, 'iteration']

    start_date = "2009-01-01"

    am = AnalyzeManager()
    am.add_analyzer(PrevAnalyzer(start_date=start_date,
                                 save_file=output_fname,
                                 cait_output_mode=True,
                                 plot_bairros=plot_bairros))

    with open(calib_folder+"iter{}/IterationState.json".format(iteration)) as fin:
        iteration_state = json.loads(fin.read())
    siminfo = OrderedDict(iteration_state['simulations'])
    for item in list(siminfo.items()) :
        if item[1]['__sample_index__'] == sample :
            simid = item[0]
            # print("Sim ID: ",simid)
            am.add_simulation(simid)
    am.analyze()
Beispiel #6
0
def plot_vectors(exp_id, sample, save_file=None):
    am = AnalyzeManager()
    am.add_experiment(retrieve_experiment(exp_id))
    am.add_analyzer(
        VectorSpeciesReportAnalyzer(sample,
                                    save_file=save_file,
                                    channel='Daily HBR'))
    am.analyze()
Beispiel #7
0
def stdout(args, unknownArgs):
    exp_manager = reload_experiment(args)
    states, msgs = exp_manager.get_simulation_status()

    if not exp_manager.status_succeeded(states):
        logger.warning(
            'WARNING: not all jobs have finished successfully yet...')

    found = False
    for sim_id, state in states.items():
        if (state is SimulationState.Succeeded and args.succeeded) or\
               (state is SimulationState.Failed and args.failed) or \
               (not args.succeeded and not args.failed):
            found = True
            break
    if not found:
        print("No simulations found...")
    else:
        am = AnalyzeManager(exp_list=[exp_manager.experiment],
                            analyzers=StdoutAnalyzer([sim_id], args.error),
                            force_analyze=True,
                            verbose=False)
        am.analyze()
Beispiel #8
0
def vector(expname, calib_stage, rank) :
    calib_folder = calib_base + expname +"/"
    output_fname = calib_folder + "rank{}_vectors".format(rank)

    LL_fname = calib_folder + "_plots/LL_all.csv"
    LL_df = pd.read_csv(LL_fname)
    LL_df.sort_values(by='total', ascending=False, inplace=True)
    LL_df.reset_index(inplace=True)

    sample = LL_df.loc[rank, 'sample']
    iteration = LL_df.loc[rank, 'iteration']

    am = AnalyzeManager()
    am.add_analyzer(VectorSpeciesReportAnalyzer(save_file=output_fname, channel='Daily HBR'))

    with open(calib_folder+"iter{}/IterationState.json".format(iteration)) as fin:
        iteration_state = json.loads(fin.read())
    siminfo = OrderedDict(iteration_state['simulations'])
    for item in list(siminfo.items()) :
        if item[1]['__sample_index__'] == sample :
            simid = item[0]
    am.add_simulation(simid)
    am.analyze()
Beispiel #9
0
    ####### Plots with vectors eir with latest eradication.exe Cov = 1., Seek = 1., Sweep Rate
    #Conclusion : No variation in Incidence rate which increased and remained consistent at a rate of ~2.3
    # 0Round Rate = .3
    #exps.append(retrieve_experiment('88d2db14-0461-e711-9401-f0921c16849d'))
    # 0Round Rate = .5
    #exps.append(retrieve_experiment('a8e33f25-0461-e711-9401-f0921c16849d'))
    # 0Round Rate = .75
    #exps.append(retrieve_experiment('987a4438-0461-e711-9401-f0921c16849d'))
    # 0Round Rate = 1
    #exps.append(retrieve_experiment('fc09984f-0461-e711-9401-f0921c16849d'))

    ####### Plots with vectors eir with latest eradication.exe Cov = .3, Rate = 0., Sweep Seek
    # 0Round Seek = .3
    exps.append(retrieve_experiment('dc6ef1f4-0861-e711-9401-f0921c16849d'))
    # 0Round Seek = .5
    exps.append(retrieve_experiment('2971800a-0961-e711-9401-f0921c16849d'))
    # 0Round Seek = .75
    exps.append(retrieve_experiment('a51c1d1c-0961-e711-9401-f0921c16849d'))
    # 0Round Seek = 1
    exps.append(retrieve_experiment('3df09b31-0961-e711-9401-f0921c16849d'))

    #Old plots for Gates review
    #exps.append(retrieve_experiment('3fb8ebc1-29cd-e611-93fe-f0921c168499'))
    #exps.append(retrieve_experiment('e489a1a3-29cd-e611-93fe-f0921c168499'))
    #exps.append(retrieve_experiment('118a2a45-29cd-e611-93fe-f0921c168499'))
    #exps.append(retrieve_experiment('65612e1a-29cd-e611-93fe-f0921c168499'))
    SetupParser.init('HPC')
    am = AnalyzeManager(exp_list=exps, analyzers=IncidenceAnalyzer())
    am.analyze()
Beispiel #10
0
                bairro_index += 1

            # plt.legend()

            plt.tight_layout()
            if self.save_file:
                plt.savefig(self.save_file + "_BAIRROS.pdf")
                plt.savefig(self.save_file + "_BAIRROS.png")
            else:
                plt.show()


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # am.add_experiment(retrieve_experiment("d4b08d09-1835-e811-a2bf-c4346bcb7274")) #caputine iter12. best 8.
    # am.add_experiment(retrieve_experiment("f5e78cbb-1436-e811-a2bf-c4346bcb7274"))  # chichuco iter5.  best 0.  4/1 10:30pm
    # am.add_experiment(retrieve_experiment("f9df132a-c135-e811-a2bf-c4346bcb7274"))  # chicutso iter1. best 6 4/11 10:30pm
    # am.add_experiment(retrieve_experiment("15a20ddd-2a36-e811-a2bf-c4346bcb7274"))  # facazissa iter5. best 0.  4/1 10:30pm
    # am.add_experiment(retrieve_experiment("86413a54-0d36-e811-a2bf-c4346bcb7274"))  # magude iter3. best 10.  4/1 10:30pm  X
    # am.add_experiment(retrieve_experiment("15a1d9fe-2f36-e811-a2bf-c4346bcb7274"))  # mahel iter9.  best 0. 4/1
    #  am.add_experiment(retrieve_experiment("0fc16f8f-2636-e811-a2bf-c4346bcb7274")) # mapulanguene iter9. best 10.  4/1 10:30pm
    # am.add_experiment(retrieve_experiment("f5873afe-1336-e811-a2bf-c4346bcb7274"))  # moine iter6. best 0 4/1
    # am.add_experiment(retrieve_experiment("19794550-c135-e811-a2bf-c4346bcb7274"))  # motaze iter1. best 15 4/1
    # am.add_experiment(retrieve_experiment("e6f8c635-2d36-e811-a2bf-c4346bcb7274"))  # panjane iter6. best 0 4/1

    # am.add_experiment(retrieve_experiment("6fe0132a-c135-e811-a2bf-c4346bcb7274")) # faca stage1, iter1, best 9
    # am.add_experiment(retrieve_experiment("86413a54-0d36-e811-a2bf-c4346bcb7274")) # m-s stage 1. iter3, best 12
    # am.add_experiment(retrieve_experiment("eb30545d-e536-e811-a2bf-c4346bcb7274")) # m-s stage 2.  ite3, best 6
Beispiel #11
0
from dtk.utils.analyzers import TimeseriesAnalyzer, VectorSpeciesAnalyzer
from simtools.AnalyzeManager.AnalyzeManager import AnalyzeManager
from simtools.Utilities.Experiments import retrieve_experiment

if __name__ == "__main__":
    # Retrieve a couple of test experiments
    experiment1 = retrieve_experiment('158cc530-780e-e711-9400-f0921c16849c')
    experiment2 = retrieve_experiment('c62aa746-780e-e711-9400-f0921c16849c')

    # Create an analyze manager
    # Note that we are adding the experiments that we want to analyze
    am = AnalyzeManager(exp_list=[experiment1, experiment2])

    # Add the TimeSeriesAnalyzer to the manager
    am.add_analyzer(TimeseriesAnalyzer())
    am.add_analyzer(VectorSpeciesAnalyzer())

    # Analyze
    am.analyze()
                #              c='C{}'.format(8 % (ni + 1)),s=np.sqrt(pop))

            ax.set_xlabel("Date")
            ax.set_ylabel("RDT Prevalence")
            # ax.legend(fontsize=6)
            ax.set_xlim([foo("2012-01-01"), foo("2017-01-01")])

        plt.tight_layout()
        # plt.show()
        plt.savefig(self.base + "data/figs/{}_prev_by_mcluster.png".format(self.catch))


if __name__=="__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # Corrected stepd
    # am.add_experiment(retrieve_experiment("43cac760-cbd6-e711-9414-f0921c16b9e5")) # bbondo
    # am.add_experiment(retrieve_experiment("a31b516a-cbd6-e711-9414-f0921c16b9e5"))  # chabbobboma
    # am.add_experiment(retrieve_experiment("1ecdf372-cbd6-e711-9414-f0921c16b9e5")) # chisanga
    # am.add_experiment(retrieve_experiment("957e6159-32d6-e711-9414-f0921c16b9e5")) # chiyabi
    # am.add_experiment(retrieve_experiment("9669907b-cbd6-e711-9414-f0921c16b9e5"))  # luumbo
    # am.add_experiment(retrieve_experiment("fbe40809-ccd6-e711-9414-f0921c16b9e5"))  # munyumbwe
    # am.add_experiment(retrieve_experiment("8aadd6a0-cbd6-e711-9414-f0921c16b9e5"))  # nyanga chaamwe
    # am.add_experiment(retrieve_experiment("d18a9aa8-cbd6-e711-9414-f0921c16b9e5"))  # sinafala
    am.add_experiment(retrieve_experiment("d28a9aa8-cbd6-e711-9414-f0921c16b9e5"))  # sinamalima

    # Old MBGSR
    # am.add_experiment(retrieve_experiment("7f188957-2fe1-e711-9414-f0921c16b9e5")) # bbondo
    # am.add_experiment(retrieve_experiment("f60d69eb-2fe1-e711-9414-f0921c16b9e5"))  # chabbobboma
        #     # c1 = green = IRS
        #     # c2 = red = MSAT
        #     # c3 = purple = MDA

        plt.legend()
        # plt.xlim([3000,7000])
        plt.xlim([foo("2010-01-01"), foo("2019-01-01")])
        # plt.show()
        plt.tight_layout()
        plt.savefig(base + "data/figs/{}_prev_node.png".format(catch))


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # am.add_experiment(retrieve_experiment("43cac760-cbd6-e711-9414-f0921c16b9e5")) # bbondo
    # am.add_experiment(retrieve_experiment("a31b516a-cbd6-e711-9414-f0921c16b9e5"))  # chabbobboma
    # am.add_experiment(retrieve_experiment("1ecdf372-cbd6-e711-9414-f0921c16b9e5")) # chisanga
    # am.add_experiment(retrieve_experiment("957e6159-32d6-e711-9414-f0921c16b9e5")) # chiyabi
    # am.add_experiment(retrieve_experiment("9669907b-cbd6-e711-9414-f0921c16b9e5"))  # luumbo
    # am.add_experiment(retrieve_experiment("fbe40809-ccd6-e711-9414-f0921c16b9e5"))  # munyumbwe
    am.add_experiment(
        retrieve_experiment(
            "8aadd6a0-cbd6-e711-9414-f0921c16b9e5"))  # nyanga chaamwe
    # am.add_experiment(retrieve_experiment("d18a9aa8-cbd6-e711-9414-f0921c16b9e5"))  # sinafala
    # am.add_experiment(retrieve_experiment("d28a9aa8-cbd6-e711-9414-f0921c16b9e5"))  # sinamalima

    am.add_analyzer(RDTPrevAnalyzer())
    am.analyze()
Beispiel #14
0
                config_builder)
            COMPS_experiment_name = scenario_name
            # COMPS_experiment_name = suite_name # I want hover-over in COMPS to be the suite name

            experiment_manager.run_simulations(exp_name=COMPS_experiment_name,
                                               exp_builder=experiment_builder,
                                               suite_id=suite_id)
            experiments.append(experiment_manager)
            experiments_ids.append(experiment_manager.experiment.exp_id)

        # Dump the experiment ids for resume
        with open('ids.json', 'w') as out:
            json.dump(experiments_ids, out)

    # Every experiments are created at this point -> Analyze
    am = AnalyzeManager(verbose=False, create_dir_map=False)
    for em in experiments:
        am.add_experiment(em.experiment)
#    am.add_analyzer(DownloadAnalyzerTPI(['output\\DemographicsSummary.json', 'config.json', 'output\\ReportHIVART.csv', 'output\\ReportHIVByAgeAndGender.csv'],
#                                        output_dir='Test HIV 1'))
    am.add_analyzer(
        DownloadAnalyzerTPI(['output\\ReportHIVByAgeAndGender.csv'],
                            output_dir='Nyanza Base Case'))

    # While the experiments are running, we are analyzing every 15 seconds
    while not all([em.finished() for em in experiments]):
        map(lambda e: e.refresh_experiment(), experiments)
        print("Analyzing !")
        am.analyze()
        print("Waiting 15 seconds")
        time.sleep(15)
from simtools.AnalyzeManager.AnalyzeManager import AnalyzeManager
from simtools.ExperimentManager.ExperimentManagerFactory import ExperimentManagerFactory
from simtools.SetupParser import SetupParser

SetupParser.default_block = 'HPC'

cb = DTKConfigBuilder.from_defaults('VECTOR_SIM')
state_file = 'cleared-state-25550.dtk'
temp_path = 'tempdl'
source_simulation = '6ce475d8-15aa-e711-9414-f0921c16b9e5'

if __name__ == "__main__":
    # Download the state file
    da = DownloadAnalyzer(filenames=["output\\{}".format(state_file)],
                          output_path=temp_path)
    am = AnalyzeManager(sim_list=[source_simulation], analyzers=[da])
    am.analyze()

    # Add the state file
    cb.experiment_files.add_file(
        os.path.join(temp_path, source_simulation, state_file))
    load_Serialized_Population(cb, 'Assets', [state_file])

    # Run !
    SetupParser.init()
    exp_manager = ExperimentManagerFactory.from_cb(cb)
    exp_manager.run_simulations(exp_name='test serialization')

    # Cleanup temp directory
    shutil.rmtree(temp_path)
Beispiel #16
0
        add_cell_intervention_timing_rugs_to_plot(ax, catch_cell_ids)

        plt.legend()
        # plt.xlim([3000,7000])
        plt.xlim([foo("2010-01-01"), foo("2019-01-01")])

        plt.tight_layout()
        plt.show()
        # plt.savefig(self.base + "data/figs/{}_prev.png".format(catch))


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # Calibration experiments:
    am.add_experiment(
        retrieve_experiment("66f05adf-c10b-e811-9415-f0921c16b9e5"))

    # hand-fudged Milen habitat params
    # am.add_experiment(retrieve_experiment("4766b178-f5f4-e711-9414-f0921c16b9e5")) #bbondo
    # am.add_experiment(retrieve_experiment("34213b5c-f8f4-e711-9414-f0921c16b9e5"))  # chabbobboma
    # am.add_experiment(retrieve_experiment("84d95a7a-faf4-e711-9414-f0921c16b9e5"))  # chisanga
    # am.add_experiment(retrieve_experiment("c6313998-faf4-e711-9414-f0921c16b9e5")) # chiyabi
    # am.add_experiment(retrieve_experiment("69c0e4de-faf4-e711-9414-f0921c16b9e5"))  # luumbo
    # am.add_experiment(retrieve_experiment("4f045b1b-fbf4-e711-9414-f0921c16b9e5"))  # munyumbwe
    # am.add_experiment(retrieve_experiment("542b05fe-fbf4-e711-9414-f0921c16b9e5"))  # nyanga chaamwe (x0.5)
    # am.add_experiment(retrieve_experiment("b546a866-04f5-e711-9414-f0921c16b9e5"))  # nyanga chaamwe (x0.25)
    # am.add_experiment(retrieve_experiment("a938d951-06f5-e711-9414-f0921c16b9e5"))  # nyanga chaamwe (x0.15)
Beispiel #17
0
def catalyst(args, unknownArgs):
    """
    Catalyst run-and-analyze process as ported from the test team.
    Programmatic-only arguments:
        args.mode : used by FidelityReportExperimentDefinition, default: 'prod'
        args.report_label : attached to the experiment name
        args.debug : True/False, passed into FidelityReportAnalyzer, default: False
    :param args:
    :param unknownArgs:
    :return:
    """
    from dtk.utils.builders.sweep import GenericSweepBuilder
    from catalyst_report.fidelity_report_analyzer import FidelityReportAnalyzer
    from catalyst_report.fidelity_report_experiment_definition import FidelityReportExperimentDefinition
    import catalyst_report.utils as catalyst_utils
    from simtools.Analysis.AnalyzeManager import AnalyzeManager

    # we're going to do a dtk run, then a set-piece analysis. But first we need to do some overrides
    # to get the run part to do the desired parameter sweep.

    mod = args.loaded_module

    # when run with 'dtk catalyst', run_sim_args['exp_name'] will have additional information appended.
    mod.run_sim_args[
        'exp_name'] = mod.run_sim_args['exp_name'] + '-development'

    # lining up the arguments expected by FidelityReportExperimentDefinition
    args.sweep = args.sweep_method

    # hidden, programmatic arguments
    args.mode = args.mode if hasattr(args, 'mode') else 'prod'
    args.report_label = args.report_label if hasattr(args,
                                                     'report_label') else None
    args.debug = args.debug if hasattr(args, 'debug') else False

    # determine which report is being asked for. If not specified, default to what the config.json file says
    # ck4, this should go somewhere else, on a Config object of some sort? (prob not the builder, though)
    report_type_mapping = {
        'DENGUE_SIM': 'dengue',
        'GENERIC_SIM': 'generic',
        'HIV_SIM': 'hiv',
        'MALARIA_SIM': 'malaria',
        'POLIO_SIM': 'polio',
        'STI_SIM': 'sti',
        'TB_SIM': 'tb',
        'TYPHOID_SIM': 'typhoid',
        'VECTOR_SIM': 'generic'
    }
    if args.report_type:
        report_type = args.report_type
    else:
        sim_type = mod.run_sim_args['config_builder'].config['parameters'][
            'Simulation_Type']
        report_type = report_type_mapping.get(sim_type, None)
        if not report_type:
            raise KeyError(
                'Default report type could not be determined for sim_type: %s. Report type must be specified'
                ' via -r flag.' % sim_type)

    # Create and set a builder to sweep over population scaling or model timestep
    reports = catalyst_utils.load_report_definitions(
        definitions_filename=args.report_definitions)
    if report_type in reports:
        args.report_channel_list = reports[report_type]['inset_channel_names']
    else:
        raise Exception('Invalid report: %s. Available reports: %s' %
                        (report_type, sorted(reports.keys())))
    catalyst_config = catalyst_utils.load_sweep_configs(
        sweep_type=args.sweep_type, config_filename=args.sweep_definitions)
    defn = FidelityReportExperimentDefinition(catalyst_config, args)

    # redefine the experiment name so it doesn't conflict with the likely follow-up non-catalyst experiment
    mod.run_sim_args['exp_name'] = 'Catalyst-' + mod.run_sim_args['exp_name']

    # define the sweep to perform
    sweep_dict = {
        'Run_Number': range(1,
                            int(defn['nruns']) + 1),
        defn['sweep_param']: defn['sweep_values']
    }
    mod.run_sim_args['exp_builder'] = GenericSweepBuilder.from_dict(sweep_dict)

    # overwrite spatial output channels to those used in the catalyst report
    spatial_channel_names = defn['spatial_channel_names']
    if len(spatial_channel_names) > 0:
        mod.run_sim_args['config_builder'].enable('Spatial_Output')
        mod.run_sim_args['config_builder'].params[
            'Spatial_Output_Channels'] = spatial_channel_names
    else:
        mod.run_sim_args['config_builder'].disable('Spatial_Output')
        mod.run_sim_args['config_builder'].params[
            'Spatial_Output_Channels'] = []

    # now run if no preexisting experiment id was provided
    if not args.experiment_id:
        # we must always block so that we can run the analysis at the end; run and analyze!
        args.blocking = True
        experiment_manager = run(args, unknownArgs)
        experiment = experiment_manager.experiment
        print('Done running experiment: %s' % experiment.exp_id)
    else:
        experiment = retrieve_experiment(args.experiment_id)

    # Create an analyze manager
    am = AnalyzeManager(exp_list=[experiment], verbose=False)

    # Add the TimeSeriesAnalyzer to the manager and do analysis
    # ck4, is there a better way to specify the first 4 arguments? The DTKCase from Test-land might be nicer.
    # After all, the names COULD be different
    analyzer = FidelityReportAnalyzer(
        'catalyst_report',
        'config.json',
        mod.run_sim_args['config_builder'].get_param(
            'Demographics_Filenames')[0],
        experiment_definition=defn,
        experiment_id=experiment.exp_id,
        experiment_name=experiment.exp_name,
        label=args.report_label,
        time_series_step_from=defn['step_from'],
        time_series_step_to=defn['step_to'],
        time_series_equal_step_count=True,
        raw_data=True,
        debug=args.debug)
    am.add_analyzer(analyzer)
    am.analyze()

    import webbrowser
    webbrowser.open_new("file:///{}".format(
        os.path.join(os.getcwd(), "catalyst_report", "summary_report.html")))
Beispiel #18
0
            int(x["year"]) + 2010,
            str(int(x["month"])).zfill(2)),
                              axis=1)
        # print("mdate")
        df["mdate"] = df.apply(lambda x: date_to_mdate(x["date"]), axis=1)
        # print("plot")
        # ax.plot(df["year"] * 12 + df["month"], df["cases"], *args, **kwargs)
        ax.plot_date(df["mdate"], df["cases"], *args, **kwargs)

        ax.set_xlabel("Date")
        ax.set_ylabel("Cases")
        ax.set_xlim([date_to_mdate("2010-01-01"), date_to_mdate("2017-01-01")])
        # ax.tick_params(direction="inout")

    def uid(self):
        ''' A unique identifier of site-name and analyzer-name. '''
        return '_'.join([self.site.name, self.name])


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # Calibration experiments:
    am.add_experiment(
        retrieve_experiment("a0bee2bd-f8b5-e811-a2c0-c4346bcb7275"))

    am.add_analyzer(incidence_likelihood(zambia_calib_site("bbondo")))
    am.analyze()
########################
# other configurations
########################

# If the base collection containing CMS exists, use it
# If not, use the local
if SetupParser.default_block == "HPC":
    try:
        cb.set_collection_id('CMS 0.82 Pre-release')
    except SimulationAssets.InvalidCollection:
        cb.set_experiment_executable('inputs/compartments/compartments.exe')
        cb.set_dll_root('inputs/compartments')
else:
    cb.set_experiment_executable('inputs/compartments/compartments.exe')
    cb.set_dll_root('inputs/compartments')

run_sim_args = {"config_builder": cb, "exp_name": "First Default CMS run"}

if __name__ == "__main__":
    SetupParser.init()
    em = ExperimentManagerFactory.from_cb(run_sim_args["config_builder"])
    em.run_simulations(exp_name=run_sim_args["exp_name"])

    # Wait for the simulation to complete
    em.wait_for_finished(verbose=True)

    # Analyze
    am = AnalyzeManager(exp_list='latest')
    am.add_analyzer(SimpleCMSAnalyzer())
    am.analyze()
Beispiel #20
0
            date_to_mdate("2013-01-01"),
            date_to_mdate("2014-01-01"),
            date_to_mdate("2015-01-01"),
            date_to_mdate("2016-01-01"),
            date_to_mdate("2017-01-01"),
            date_to_mdate("2018-01-01"),
            date_to_mdate("2019-01-01")
        ])

        # plt.ylim([-0.01,0.25])
        plt.ylabel("RDT Prevalence")
        plt.legend(frameon=True)
        plt.tight_layout()
        if self.save_file:
            plt.savefig(self.save_file + ".pdf")
            plt.savefig(self.save_file + ".png")
        else:
            plt.show()


if __name__ == "__main__":
    am = AnalyzeManager()
    # am.add_experiment(retrieve_experiment("cdb12c2d-61c3-e811-a2bd-c4346bcb1555"))
    am.add_experiment(
        retrieve_experiment("9df3a55a-63c3-e811-a2bd-c4346bcb1555"))
    # am.add_analyzer(custom_prev_plot_analyzer("chiyabi","C0", 3))
    # am.add_analyzer(custom_prev_plot_analyzer("chiyabi","C1", 4))
    am.add_analyzer(custom_prev_plot_analyzer("chiyabi", "C2", 5))
    # am.add_analyzer(custom_prev_plot_analyzer("chiyabi","C3", 6))
    am.analyze()
Beispiel #21
0

    def finalize(self):
        # print self.my_data
        print("")

    def plot(self):
        import matplotlib.pyplot as plt

        # Plot histogram of trips
        for sim_id, data in self.n_trips.items():
            # data only contains data for travellers.  Need to add in "zero trips" for everyone who didn't travel.
            n_couch = self.pop_init[sim_id] - self.n_travellers[sim_id]
            full_data = np.append(data, np.zeros(int(n_couch)))
            plt.hist(full_data,histtype='stepfilled',alpha=0.4,log=True, label=self.metadata[sim_id])

        plt.legend()

        plt.show()



if __name__=="__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    am.add_experiment(retrieve_experiment("151f8b4b-867c-e711-9401-f0921c16849d"))

    am.add_analyzer(MigrationAnalyzer())
    am.analyze()
Beispiel #22
0
def plot_RDT(exp_id, sample, save_file=None, **kwargs):
    am = AnalyzeManager()
    am.add_experiment(retrieve_experiment(exp_id))
    am.add_analyzer(
        prevalence_plot_analyzer(catch, sample, save_file=save_file, **kwargs))
    am.analyze()
        plt.tight_layout()
        # if self.save_file:
        #     # if self.cait_output_mode:
        #     #     MozambiqueExperiment.save_figs_for_caitlin(fig,self.save_file)
        #     # else:
        if not self.save_file:
            self.save_file = save_file = "figs/{}".format(self.catch)
        # plt.savefig(self.save_file + ".pdf")
        # plt.savefig(self.save_file + ".png")
        # else:
        plt.show()
        print("Done!")


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()
    # am.add_experiment(retrieve_experiment("0a373d77-1f93-e811-a2c0-c4346bcb7275")) # chichuco
    # am.add_experiment(retrieve_experiment("0d801fc0-3c92-e811-a2c0-c4346bcb7275")) # chicutso
    am.add_experiment(
        retrieve_experiment(
            "c5c3c5bb-a79c-e811-a2c0-c4346bcb7275"))  # magude-sede-facazissa
    # am.add_experiment(retrieve_experiment("210bcb89-e696-e811-a2c0-c4346bcb7275")) # mahel
    # am.add_experiment(retrieve_experiment("10238aac-7593-e811-a2c0-c4346bcb7275")) # mapulanguene
    # am.add_experiment(retrieve_experiment("85bef741-2d97-e811-a2c0-c4346bcb7275")) # moine
    # am.add_experiment(retrieve_experiment("140fe8a7-1194-e811-a2c0-c4346bcb7275")) # motaze
    # am.add_experiment(retrieve_experiment("b1c79146-6194-e811-a2c0-c4346bcb7275")) # panjane-caputine

    am.add_analyzer(PrevAnalyzer())
    am.analyze()
Beispiel #24
0
            for exp_id in self.pop_data.keys():
                plt.plot(
                    np.array(self.raw_pop_data[exp_id][-2]) /
                    self.tot_pop[exp_id][-2])
            ax.set_title("Late")
            ax.set_xticks(range(24))
            ax.set_xticklabels(self.age_bins)
            plt.show()

        #
        # for exp_id in self.pop_data.keys():
        #     plt.plot_date(self.report_times, self.pop_data[exp_id],fmt='-',c=c,linewidth=lw,label=label,alpha=0.4)
        #     plt.plot_date(self.report_times, self.pop_data[exp_id], fmt='-', c=c, linewidth=lw, label=label, alpha=0.4)
        #     plt.plot_date(self.report_times, self.pop_data[exp_id], fmt='-', c=c, linewidth=lw, label=label, alpha=0.4)
        #     plt.plot_date(self.report_times, self.pop_data[exp_id], fmt='-', c=c, linewidth=lw, label=label, alpha=0.4)
        # plt.legend([s['environment'] for s in self.metadata.values()])


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    am.add_experiment(
        retrieve_experiment("f4ecdcc6-768c-e711-9401-f0921c16849d"))  # L1
    # am.add_experiment(retrieve_experiment("001a9f44-758c-e711-9401-f0921c16849d")) # L5
    am.add_experiment(
        retrieve_experiment("4188b9de-e28c-e711-9401-f0921c16849d"))  # L6

    am.add_analyzer(AgeStratificationAnalyzer())
    am.analyze()
Beispiel #25
0
        selected = [
            p.selected_data[id(self)] for p in parsers.values()
            if id(self) in p.selected_data
        ]
        self.data = pd.concat(selected)

    def finalize(self):
        import seaborn as sns
        sns.set_style("darkgrid")

        fig = plt.figure(figsize=(10, 6))
        ax = fig.gca()
        for a, adf in self.data.groupby('sim_id'):
            for s, sdf in adf.groupby('species'):
                ax.plot(sdf['date'], sdf[self.channel], label=s)
        ax.legend()
        plt.ylabel(self.channel)
        if self.save_file:
            plt.savefig(self.save_file + ".png")
            plt.savefig(self.save_file + ".pdf")
        else:
            plt.show()


if __name__ == '__main__':

    am = AnalyzeManager()
    am.add_analyzer(VectorSpeciesReportAnalyzer())
    am.add_simulation('4047a20f-b33d-e811-a2bf-c4346bcb7274')
    am.analyze()
                                                  ymax=1.0)

        plt.legend()
        # plt.xlim([3000,7000])
        plt.xlim([foo("2014-01-01"), foo("2018-01-01")])
        plt.ylim([-0.01,0.15])

        plt.tight_layout()
        plt.show()
        # plt.savefig(self.base + "data/figs/{}_prev.png".format(catch))


if __name__=="__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # Calibration experiments:
    # am.add_experiment(retrieve_experiment("09829129-b00b-e811-9415-f0921c16b9e5")) #Mahel
    # am.add_experiment(retrieve_experiment("11cb8543-e20b-e811-9415-f0921c16b9e5")) #Motaze
    # am.add_experiment(retrieve_experiment("8853ca79-1c0c-e811-9415-f0921c16b9e5"))

    # am.add_experiment(retrieve_experiment("171711d2-a010-e811-9415-f0921c16b9e5")) #Caputine
    # am.add_experiment(retrieve_experiment("632dd6f5-a610-e811-9415-f0921c16b9e5")) # Chicutso
    # am.add_experiment(retrieve_experiment("ef6564ad-a110-e811-9415-f0921c16b9e5"))  # Mahel
    # am.add_experiment(retrieve_experiment("fd4866f4-a310-e811-9415-f0921c16b9e5"))  # Mapulanguene
    # am.add_experiment(retrieve_experiment("da1bccd2-a910-e811-9415-f0921c16b9e5"))  # Moine
    # am.add_experiment(retrieve_experiment("7e10e1d1-a710-e811-9415-f0921c16b9e5"))  # Panjane

    # am.add_experiment(retrieve_experiment("7e10e1d1-a710-e811-9415-f0921c16b9e5"))  # Panjane multi-dose