def check_location(self, iteration_state):
        """
        - Handle the case: process got interrupted but it still runs on remote
        - Handle location change case: may resume from commission instead
        """
        # Step 1: Checking possible location changes
        try:
            exp_id = iteration_state.experiment_id
            exp = retrieve_experiment(exp_id)
        except:
            exp = None
            import traceback
            traceback.print_exc()

        if not exp:
            var = input("Cannot restore Experiment 'exp_id: %s'. Force to resume from commission... Continue ? [Y/N]" % exp_id if exp_id else 'None')
            # force to resume from commission
            if var.upper() == 'Y':
                iteration_state.resume_point = StatusPoint.commission
            else:
                logger.info("Answer is '%s'. Exiting...", var.upper())
                exit()

        # If location has been changed, will double check user for a special case before proceed...
        if self.location != exp.location:
            location = SetupParser.get('type')
            var = input("Location has been changed from '%s' to '%s'. Resume will start from commission instead, do you want to continue? [Y/N]:  " % (
                exp.location, location))
            if var.upper() == 'Y':
                self.current_iteration.resume_point = StatusPoint.commission
            else:
                logger.info("Answer is '%s'. Exiting...", var.upper())
                exit()
Exemple #2
0
    def add_experiment(self, experiment):
        from simtools.DataAccess.Schema import Experiment
        if not isinstance(experiment, Experiment):
            experiment = retrieve_experiment(experiment)

        if experiment not in self.experiments:
            self.experiments.append(experiment)
Exemple #3
0
def plot_vectors(exp_id, sample, save_file=None):
    am = AnalyzeManager()
    am.add_experiment(retrieve_experiment(exp_id))
    am.add_analyzer(
        VectorSpeciesReportAnalyzer(sample,
                                    save_file=save_file,
                                    channel='Daily HBR'))
    am.analyze()
Exemple #4
0
def retrieve_item(itemid):
    """
    Return the object identified by id.
    Can be an experiment, a suite or a batch.
    If it is a suite, all experiments with this suite_id will be returned.
    """
    # First try to get an experiment
    from simtools.Utilities.Experiments import retrieve_experiment
    from simtools.DataAccess.DataStore import DataStore
    from simtools.Utilities.COMPSUtilities import exps_for_suite_id
    from simtools.Utilities.Experiments import retrieve_simulation

    # Try experiments first
    try:
        return retrieve_experiment(itemid)
    except:
        pass

    # This was not an experiment, maybe a batch ?
    batch = DataStore.get_batch_by_id(itemid)
    if batch: return batch

    batch = DataStore.get_batch_by_name(itemid)
    if batch: return batch

    # Still no item found -> test the suites
    exps = DataStore.get_experiments_by_suite(itemid)
    if exps: return exps

    # Still no item found -> test the simulations
    sim = DataStore.get_simulation(itemid)
    if sim: return sim

    # Still not -> last chance is a COMPS suite
    exps = exps_for_suite_id(itemid)
    if exps: return [retrieve_experiment(str(exp.id)) for exp in exps]

    # Nothing, consider COMPS simulation
    try:
        return retrieve_simulation(itemid)
    except:
        pass

    # Didnt find anything sorry
    raise (Exception('Could not find any item corresponding to %s' % itemid))
def run_single_analyzer(exp_id, analyzer, savefile_prefix=""):
    SetupParser.default_block = 'HPC'
    SetupParser.init()

    am = AnalyzeManager()
    am.add_analyzer(analyzer())
    exp = retrieve_experiment(exp_id)
    am.add_experiment(exp)
    am.analyze()

    df_return = am.analyzers[0].results
    df_return.to_csv("{}_{}.csv".format(savefile_prefix, exp_id), index=False)
    return df_return
Exemple #6
0
    def add_experiment(self, experiment):
        from simtools.DataAccess.Schema import Experiment
        from simtools.Utilities.COMPSUtilities import COMPS_login

        if not isinstance(experiment, Experiment):
            experiment = retrieve_experiment(experiment)

        if experiment not in self.experiments:
            self.experiments.add(experiment)
            if experiment.location == "HPC":
                COMPS_login(experiment.endpoint)
                COMPSCache.load_experiment(experiment.exp_id)

            self.filter_simulations(experiment.simulations)
Exemple #7
0
    def from_experiment(cls, experiment, config_builder=None):
        # If a string is passed -> get the experiment from DB
        if isinstance(experiment, str):
            from simtools.Utilities.Experiments import retrieve_experiment
            experiment = retrieve_experiment(experiment)

        logger.debug(
            "Factory - Creating ExperimentManager for experiment %s pid: %d location: %s"
            % (experiment.id, os.getpid(), experiment.location))
        manager_class = cls._factory(type=experiment.location)
        manager = manager_class(experiment=experiment,
                                config_builder=config_builder)

        return manager
Exemple #8
0
    def resume(self, iter_step):
        # step 1: If we know we are running -> recreate the exp_manager
        if iter_step.value >= StatusPoint.running.value:
            self.exp_manager = ExperimentManagerFactory.from_experiment(
                retrieve_experiment(self.experiment_id))

        # step 2: restore next_point
        if iter_step not in (StatusPoint.plot, StatusPoint.next_point,
                             StatusPoint.running) and self.iteration != 0:
            if iter_step == StatusPoint.commission or iter_step == StatusPoint.iteration_start:
                iteration_state = IterationState.restore_state(
                    self.calibration_name, self.iteration - 1)
                self.next_point_algo.set_state(iteration_state.next_point,
                                               self.iteration - 1)
            elif iter_step == StatusPoint.analyze:
                iteration_state = IterationState.restore_state(
                    self.calibration_name, self.iteration)
                self.next_point_algo.set_state(iteration_state.next_point,
                                               self.iteration)

                # For IMIS ONLY!
                self.next_point_algo.restore(
                    IterationState.restore_state(self.calibration_name,
                                                 self.iteration - 1))
        else:
            self.next_point_algo.set_state(self.next_point, self.iteration)

        # step 3: restore Calibration results
        if self.iteration > 0 and iter_step.value < StatusPoint.plot.value:
            # it will combine current results with previous results
            self.restore_results(self.iteration - 1)
        else:
            # it will use the current results and resume from next iteration
            self.restore_results(self.iteration)

        # step 4: prepare resume states
        if iter_step.value <= StatusPoint.commission.value:
            # need to run simulations
            self.simulations = {}

        if iter_step.value <= StatusPoint.analyze.value:
            # just need to calculate the results
            self.results = {}

        self._status = StatusPoint(iter_step.value -
                                   1) if iter_step.value > 0 else None
def set_up_input_paths(cb,
                       exe_collection_id,
                       dll_collection_id,
                       input_collection_id,
                       burnin_id=''):

    cb.set_exe_collection(exe_collection_id)
    cb.set_dll_collection(dll_collection_id)
    cb.set_input_collection(input_collection_id)

    if burnin_id:
        expt = retrieve_experiment(burnin_id)
        serialized_file_path = [sim.get_path() for sim in expt.simulations][0]

        cb.update_params({
            'Serialized_Population_Path':
            os.path.join(serialized_file_path, 'output'),
            'Serialized_Population_Filenames': ['state-36500.dtk']
        })
def createSimDirectoryMap_suite(sname):

    #from simtools.OutputParser import CompsDTKOutputParser
    from COMPS.Data import Suite, QueryCriteria
    from simtools.Utilities.Experiments import retrieve_experiment
    from COMPS import Client
    Client.login('https://comps.idmod.org')

    ste = Suite.get(sname)
    expts = ste.get_experiments()
    tagstemp = []
    simidstemp = []

    for exp in expts:
        expt = retrieve_experiment(str(exp.id))
        tagstemp.append([x.tags for x in expt.simulations])
        simidstemp.append([x.id for x in expt.simulations])
    tags = [item for sublist in tagstemp for item in sublist]
    simids = [item for sublist in simidstemp for item in sublist]
    df = pd.DataFrame(tags)
    df['simid'] = simids

    """
    sdf = get_status(expname)

    if not (sdf['status'].str.contains('Succeeded')).all() :
        print 'Warning: not all jobs in %s succeeded' % expname, ':',
        print len(sdf[sdf['status'] != 'Succeeded']), 'unsucceeded'
        df = pd.merge(left=df, right=sdf, on='simid')
        df = df[df['status'] == 'Succeeded']

        e = Experiment.get(expt.exp_id)
        sims = e.get_simulations(QueryCriteria().select(['id', 'state']).where('state=Succeeded').select_children('hpc_jobs'))
        sdf = pd.DataFrame( { 'simid' : [x.id for x in sims],
                              'outpath' : [x.hpc_jobs[-1].working_directory for x in sims] } )
        sdf['simid'] = sdf['simid'].astype(unicode)
        df = pd.merge(left=df, right=sdf, on='simid')
    else :
    """
    df['outpath'] = pd.Series([x.get_path() for x in expt.simulations])

    return df
def createSimDirectoryMap(expname):

    #from simtools.OutputParser import CompsDTKOutputParser
    # from COMPS.Data import Suite, QueryCriteria
    # from COMPS import Client
    # Client.login('https://comps.idmod.org')

    # ste = Suite.get(expname)
    # expts = ste.get_experiments()
    #
    # for exp in expts:
    #     expt = get_expt(str(exp.id), is_name)
    #     if 'df' not in locals():
    #         df = pd.DataFrame( [x.tags for x in expt.simulations] )
    #         df['simid'] = pd.Series( [x.id for x in expt.simulations])
    Client.login('https://comps.idmod.org')

    expt = retrieve_experiment(expname)
    # expt = retrieve_experiment(expname)
    df = pd.DataFrame( [x.tags for x in expt.simulations] )
    df['simid'] = pd.Series( [x.id for x in expt.simulations])

    """
    sdf = get_status(expname)

    if not (sdf['status'].str.contains('Succeeded')).all() :
        print 'Warning: not all jobs in %s succeeded' % expname, ':',
        print len(sdf[sdf['status'] != 'Succeeded']), 'unsucceeded'
        df = pd.merge(left=df, right=sdf, on='simid')
        df = df[df['status'] == 'Succeeded']

        e = Experiment.get(expt.exp_id)
        sims = e.get_simulations(QueryCriteria().select(['id', 'state']).where('state=Succeeded').select_children('hpc_jobs'))
        sdf = pd.DataFrame( { 'simid' : [x.id for x in sims],
                              'outpath' : [x.hpc_jobs[-1].working_directory for x in sims] } )
        sdf['simid'] = sdf['simid'].astype(unicode)
        df = pd.merge(left=df, right=sdf, on='simid')
    else :
    """
    df['outpath'] = pd.Series([x.get_path() for x in expt.simulations])

    return df
Exemple #12
0
def reload_experiment(args=None, try_sync=True):
    """
    Return the experiment (for given expId) or most recent experiment
    """
    exp_id = args.expId if args else None
    if not exp_id:
        exp = DataStore.get_most_recent_experiment(exp_id)
    elif try_sync:
        try:
            exp = retrieve_experiment(exp_id, verbose=False)
        except:
            exp = None

    if not exp:
        logger.error(
            "No experiment found with the ID '%s' Locally or in COMPS. Exiting..."
            % exp_id)
        exit()

    return ExperimentManagerFactory.from_experiment(exp)
def run_analyzers(exp_id, analyzers, savefile_prefix=""):
    def _remove_duplicate_columns(df):
        columns_to_keep = []
        for c in df.columns:
            if "_duplicated" not in c:
                columns_to_keep.append(c)
        return df[columns_to_keep]

    SetupParser.default_block = 'HPC'
    SetupParser.init()

    am = AnalyzeManager()
    for a in analyzers:
        am.add_analyzer(a())
    exp = retrieve_experiment(exp_id)
    am.add_experiment(exp)
    am.analyze()

    if len(analyzers) == 1:
        df_return = am.analyzers[0].results

    elif len(analyzers) > 1:
        df_list = [x.results for x in am.analyzers]
        df_return = pd.merge(df_list[0],
                             df_list[1],
                             on="sim_id",
                             suffixes=["", "_duplicated"])

        # Drop duplicated columns
        # for c in df_result.columns:
        #     if "_duplicated" in c:
        #         df_result.drop(c, inplace=True)
        df_return = _remove_duplicate_columns(df_return)

    else:
        raise ValueError

    df_return.to_csv("{}_{}.csv".format(savefile_prefix, exp_id), index=False)
    return df_return
def collect_experiments_simulations(ids):
    experiments = dict()
    simulations = dict()

    if not ids: return experiments, simulations

    # For each, treat it differently depending on what it is
    for itemid in ids:
        item = retrieve_item(itemid)
        # We got back a list of experiment (itemid was a suite)
        if isinstance(item, list):
            experiments.update({i.exp_id: i for i in item})
        elif isinstance(item, Experiment):
            experiments[item.exp_id] = item
        elif isinstance(item, Simulation):
            simulations[item.id] = item
        elif isinstance(item, Batch):
            # We have to retrieve_experiment even if we already have the experiment object
            # to make sure we are loading the simulations associated with it
            experiments.update({i.exp_id: retrieve_experiment(i.exp_id) for i in item.experiments})
            simulations.update({i.id: retrieve_simulation(i.id) for i in item.simulations})

    return experiments, simulations
Exemple #15
0
            for exp_id in self.pop_data.keys():
                plt.plot(
                    np.array(self.raw_pop_data[exp_id][-2]) /
                    self.tot_pop[exp_id][-2])
            ax.set_title("Late")
            ax.set_xticks(range(24))
            ax.set_xticklabels(self.age_bins)
            plt.show()

        #
        # for exp_id in self.pop_data.keys():
        #     plt.plot_date(self.report_times, self.pop_data[exp_id],fmt='-',c=c,linewidth=lw,label=label,alpha=0.4)
        #     plt.plot_date(self.report_times, self.pop_data[exp_id], fmt='-', c=c, linewidth=lw, label=label, alpha=0.4)
        #     plt.plot_date(self.report_times, self.pop_data[exp_id], fmt='-', c=c, linewidth=lw, label=label, alpha=0.4)
        #     plt.plot_date(self.report_times, self.pop_data[exp_id], fmt='-', c=c, linewidth=lw, label=label, alpha=0.4)
        # plt.legend([s['environment'] for s in self.metadata.values()])


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    am.add_experiment(
        retrieve_experiment("f4ecdcc6-768c-e711-9401-f0921c16849d"))  # L1
    # am.add_experiment(retrieve_experiment("001a9f44-758c-e711-9401-f0921c16849d")) # L5
    am.add_experiment(
        retrieve_experiment("4188b9de-e28c-e711-9401-f0921c16849d"))  # L6

    am.add_analyzer(AgeStratificationAnalyzer())
    am.analyze()
Exemple #16
0
    #exps.append(retrieve_experiment('bfa4def0-ef60-e711-9401-f0921c16849d'))

    ####### Plots with vectors eir with latest eradication.exe Cov = 1., Seek = 1., Sweep Rate
    #Conclusion : No variation in Incidence rate which increased and remained consistent at a rate of ~2.3
    # 0Round Rate = .3
    #exps.append(retrieve_experiment('88d2db14-0461-e711-9401-f0921c16849d'))
    # 0Round Rate = .5
    #exps.append(retrieve_experiment('a8e33f25-0461-e711-9401-f0921c16849d'))
    # 0Round Rate = .75
    #exps.append(retrieve_experiment('987a4438-0461-e711-9401-f0921c16849d'))
    # 0Round Rate = 1
    #exps.append(retrieve_experiment('fc09984f-0461-e711-9401-f0921c16849d'))

    ####### Plots with vectors eir with latest eradication.exe Cov = .3, Rate = 0., Sweep Seek
    # 0Round Seek = .3
    exps.append(retrieve_experiment('dc6ef1f4-0861-e711-9401-f0921c16849d'))
    # 0Round Seek = .5
    exps.append(retrieve_experiment('2971800a-0961-e711-9401-f0921c16849d'))
    # 0Round Seek = .75
    exps.append(retrieve_experiment('a51c1d1c-0961-e711-9401-f0921c16849d'))
    # 0Round Seek = 1
    exps.append(retrieve_experiment('3df09b31-0961-e711-9401-f0921c16849d'))

    #Old plots for Gates review
    #exps.append(retrieve_experiment('3fb8ebc1-29cd-e611-93fe-f0921c168499'))
    #exps.append(retrieve_experiment('e489a1a3-29cd-e611-93fe-f0921c168499'))
    #exps.append(retrieve_experiment('118a2a45-29cd-e611-93fe-f0921c168499'))
    #exps.append(retrieve_experiment('65612e1a-29cd-e611-93fe-f0921c168499'))
    SetupParser.init('HPC')
    am = AnalyzeManager(exp_list=exps, analyzers=IncidenceAnalyzer())
    am.analyze()
Exemple #17
0
    # am.add_experiment(retrieve_experiment("15a20ddd-2a36-e811-a2bf-c4346bcb7274"))  # facazissa iter5. best 0.  4/1 10:30pm
    # am.add_experiment(retrieve_experiment("86413a54-0d36-e811-a2bf-c4346bcb7274"))  # magude iter3. best 10.  4/1 10:30pm  X
    # am.add_experiment(retrieve_experiment("15a1d9fe-2f36-e811-a2bf-c4346bcb7274"))  # mahel iter9.  best 0. 4/1
    #  am.add_experiment(retrieve_experiment("0fc16f8f-2636-e811-a2bf-c4346bcb7274")) # mapulanguene iter9. best 10.  4/1 10:30pm
    # am.add_experiment(retrieve_experiment("f5873afe-1336-e811-a2bf-c4346bcb7274"))  # moine iter6. best 0 4/1
    # am.add_experiment(retrieve_experiment("19794550-c135-e811-a2bf-c4346bcb7274"))  # motaze iter1. best 15 4/1
    # am.add_experiment(retrieve_experiment("e6f8c635-2d36-e811-a2bf-c4346bcb7274"))  # panjane iter6. best 0 4/1

    # am.add_experiment(retrieve_experiment("6fe0132a-c135-e811-a2bf-c4346bcb7274")) # faca stage1, iter1, best 9
    # am.add_experiment(retrieve_experiment("86413a54-0d36-e811-a2bf-c4346bcb7274")) # m-s stage 1. iter3, best 12
    # am.add_experiment(retrieve_experiment("eb30545d-e536-e811-a2bf-c4346bcb7274")) # m-s stage 2.  ite3, best 6

    # am.add_experiment(retrieve_experiment("d4b08d09-1835-e811-a2bf-c4346bcb7274")) #caputine iter12. best 8.
    # am.add_experiment(retrieve_experiment("0fc97f4a-4634-e811-a2bf-c4346bcb7274"))  # chichuco iter0.  best 3
    # am.add_experiment(retrieve_experiment("f67437d5-4e34-e811-a2bf-c4346bcb7274"))  # chicutso iter2. best 3
    # am.add_experiment(retrieve_experiment("d7d2a0be-a234-e811-a2bf-c4346bcb7274")) # facazissa iter3.  best 12
    # am.add_experiment(retrieve_experiment("3240a906-9e33-e811-a2bf-c4346bcb7274"))  # magude iter0. best 21.
    # am.add_experiment(retrieve_experiment("6cd7957f-cb34-e811-a2bf-c4346bcb7274"))  # mahel iter6. best 11.
    # am.add_experiment(retrieve_experiment("0dbd4e00-cc34-e811-a2bf-c4346bcb7274")) # mapulanguene iter8. best 3
    # am.add_experiment(retrieve_experiment("777c34a8-dc34-e811-a2bf-c4346bcb7274"))  # moine iter6. best 8
    # am.add_experiment(retrieve_experiment("5171d868-4634-e811-a2bf-c4346bcb7274"))  # motaze iter0. best 11
    # am.add_experiment(retrieve_experiment("7a5ab67b-dc34-e811-a2bf-c4346bcb7274"))  # panjane iter8. best 17

    # am.add_experiment(retrieve_experiment("2ecf9cd7-9c35-e811-a2bf-c4346bcb7274")) #aggregate 2014.  iter2, best 20
    # am.add_experiment(retrieve_experiment("d8cb3061-ae35-e811-a2bf-c4346bcb7274")) #aggregate 2014,2015.  iter2, best 5

    am.add_experiment(
        retrieve_experiment("2f76368f-bc57-e811-a2bf-c4346bcb7274"))

    am.add_analyzer(PrevAnalyzer(cait_output_mode=True, gatesreview=True))
    am.analyze()
Exemple #18
0
from dtk.utils.analyzers import TimeseriesAnalyzer, VectorSpeciesAnalyzer
from simtools.AnalyzeManager.AnalyzeManager import AnalyzeManager
from simtools.Utilities.Experiments import retrieve_experiment

if __name__ == "__main__":
    # Retrieve a couple of test experiments
    experiment1 = retrieve_experiment('158cc530-780e-e711-9400-f0921c16849c')
    experiment2 = retrieve_experiment('c62aa746-780e-e711-9400-f0921c16849c')

    # Create an analyze manager
    # Note that we are adding the experiments that we want to analyze
    am = AnalyzeManager(exp_list=[experiment1, experiment2])

    # Add the TimeSeriesAnalyzer to the manager
    am.add_analyzer(TimeseriesAnalyzer())
    am.add_analyzer(VectorSpeciesAnalyzer())

    # Analyze
    am.analyze()
        #     # c1 = green = IRS
        #     # c2 = red = MSAT
        #     # c3 = purple = MDA

        plt.legend()
        # plt.xlim([3000,7000])
        plt.xlim([foo("2010-01-01"), foo("2019-01-01")])
        # plt.show()
        plt.tight_layout()
        plt.savefig(base + "data/figs/{}_prev_node.png".format(catch))


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # am.add_experiment(retrieve_experiment("43cac760-cbd6-e711-9414-f0921c16b9e5")) # bbondo
    # am.add_experiment(retrieve_experiment("a31b516a-cbd6-e711-9414-f0921c16b9e5"))  # chabbobboma
    # am.add_experiment(retrieve_experiment("1ecdf372-cbd6-e711-9414-f0921c16b9e5")) # chisanga
    # am.add_experiment(retrieve_experiment("957e6159-32d6-e711-9414-f0921c16b9e5")) # chiyabi
    # am.add_experiment(retrieve_experiment("9669907b-cbd6-e711-9414-f0921c16b9e5"))  # luumbo
    # am.add_experiment(retrieve_experiment("fbe40809-ccd6-e711-9414-f0921c16b9e5"))  # munyumbwe
    am.add_experiment(
        retrieve_experiment(
            "8aadd6a0-cbd6-e711-9414-f0921c16b9e5"))  # nyanga chaamwe
    # am.add_experiment(retrieve_experiment("d18a9aa8-cbd6-e711-9414-f0921c16b9e5"))  # sinafala
    # am.add_experiment(retrieve_experiment("d28a9aa8-cbd6-e711-9414-f0921c16b9e5"))  # sinamalima

    am.add_analyzer(RDTPrevAnalyzer())
    am.analyze()
    # Calibration experiments:
    # am.add_experiment(retrieve_experiment("09829129-b00b-e811-9415-f0921c16b9e5")) #Mahel
    # am.add_experiment(retrieve_experiment("11cb8543-e20b-e811-9415-f0921c16b9e5")) #Motaze
    # am.add_experiment(retrieve_experiment("8853ca79-1c0c-e811-9415-f0921c16b9e5"))

    # am.add_experiment(retrieve_experiment("171711d2-a010-e811-9415-f0921c16b9e5")) #Caputine
    # am.add_experiment(retrieve_experiment("632dd6f5-a610-e811-9415-f0921c16b9e5")) # Chicutso
    # am.add_experiment(retrieve_experiment("ef6564ad-a110-e811-9415-f0921c16b9e5"))  # Mahel
    # am.add_experiment(retrieve_experiment("fd4866f4-a310-e811-9415-f0921c16b9e5"))  # Mapulanguene
    # am.add_experiment(retrieve_experiment("da1bccd2-a910-e811-9415-f0921c16b9e5"))  # Moine
    # am.add_experiment(retrieve_experiment("7e10e1d1-a710-e811-9415-f0921c16b9e5"))  # Panjane

    # am.add_experiment(retrieve_experiment("7e10e1d1-a710-e811-9415-f0921c16b9e5"))  # Panjane multi-dose



    # am.add_experiment(retrieve_experiment("f335b9ab-1f12-e811-9415-f0921c16b9e5")) # Moine DONE
    # am.add_experiment(retrieve_experiment("8731f656-2a12-e811-9415-f0921c16b9e5")) # Caputine iter6
    # am.add_experiment(retrieve_experiment("f3ed1863-2b12-e811-9415-f0921c16b9e5"))  # Mahel iter2

    # am.add_experiment(retrieve_experiment("62454c29-1212-e811-9415-f0921c16b9e5"))  # Panjane iter2

    am.add_experiment(retrieve_experiment("354912fd-3612-e811-9415-f0921c16b9e5"))  # Motaze iter4
    # am.add_experiment(retrieve_experiment("169df5ae-2b12-e811-9415-f0921c16b9e5"))  # Mapulanguene

    # pbnb
    # am.add_experiment(retrieve_experiment("002e8d2d-4e12-e811-9415-f0921c16b9e5"))  # Caputine

    am.add_analyzer(RDTPrevAnalyzer())
    am.analyze()
Exemple #21
0
def catalyst(args, unknownArgs):
    """
    Catalyst run-and-analyze process as ported from the test team.
    Programmatic-only arguments:
        args.mode : used by FidelityReportExperimentDefinition, default: 'prod'
        args.report_label : attached to the experiment name
        args.debug : True/False, passed into FidelityReportAnalyzer, default: False
    :param args:
    :param unknownArgs:
    :return:
    """
    from dtk.utils.builders.sweep import GenericSweepBuilder
    from catalyst_report.fidelity_report_analyzer import FidelityReportAnalyzer
    from catalyst_report.fidelity_report_experiment_definition import FidelityReportExperimentDefinition
    import catalyst_report.utils as catalyst_utils
    from simtools.Analysis.AnalyzeManager import AnalyzeManager

    # we're going to do a dtk run, then a set-piece analysis. But first we need to do some overrides
    # to get the run part to do the desired parameter sweep.

    mod = args.loaded_module

    # when run with 'dtk catalyst', run_sim_args['exp_name'] will have additional information appended.
    mod.run_sim_args[
        'exp_name'] = mod.run_sim_args['exp_name'] + '-development'

    # lining up the arguments expected by FidelityReportExperimentDefinition
    args.sweep = args.sweep_method

    # hidden, programmatic arguments
    args.mode = args.mode if hasattr(args, 'mode') else 'prod'
    args.report_label = args.report_label if hasattr(args,
                                                     'report_label') else None
    args.debug = args.debug if hasattr(args, 'debug') else False

    # determine which report is being asked for. If not specified, default to what the config.json file says
    # ck4, this should go somewhere else, on a Config object of some sort? (prob not the builder, though)
    report_type_mapping = {
        'DENGUE_SIM': 'dengue',
        'GENERIC_SIM': 'generic',
        'HIV_SIM': 'hiv',
        'MALARIA_SIM': 'malaria',
        'POLIO_SIM': 'polio',
        'STI_SIM': 'sti',
        'TB_SIM': 'tb',
        'TYPHOID_SIM': 'typhoid',
        'VECTOR_SIM': 'generic'
    }
    if args.report_type:
        report_type = args.report_type
    else:
        sim_type = mod.run_sim_args['config_builder'].config['parameters'][
            'Simulation_Type']
        report_type = report_type_mapping.get(sim_type, None)
        if not report_type:
            raise KeyError(
                'Default report type could not be determined for sim_type: %s. Report type must be specified'
                ' via -r flag.' % sim_type)

    # Create and set a builder to sweep over population scaling or model timestep
    reports = catalyst_utils.load_report_definitions(
        definitions_filename=args.report_definitions)
    if report_type in reports:
        args.report_channel_list = reports[report_type]['inset_channel_names']
    else:
        raise Exception('Invalid report: %s. Available reports: %s' %
                        (report_type, sorted(reports.keys())))
    catalyst_config = catalyst_utils.load_sweep_configs(
        sweep_type=args.sweep_type, config_filename=args.sweep_definitions)
    defn = FidelityReportExperimentDefinition(catalyst_config, args)

    # redefine the experiment name so it doesn't conflict with the likely follow-up non-catalyst experiment
    mod.run_sim_args['exp_name'] = 'Catalyst-' + mod.run_sim_args['exp_name']

    # define the sweep to perform
    sweep_dict = {
        'Run_Number': range(1,
                            int(defn['nruns']) + 1),
        defn['sweep_param']: defn['sweep_values']
    }
    mod.run_sim_args['exp_builder'] = GenericSweepBuilder.from_dict(sweep_dict)

    # overwrite spatial output channels to those used in the catalyst report
    spatial_channel_names = defn['spatial_channel_names']
    if len(spatial_channel_names) > 0:
        mod.run_sim_args['config_builder'].enable('Spatial_Output')
        mod.run_sim_args['config_builder'].params[
            'Spatial_Output_Channels'] = spatial_channel_names
    else:
        mod.run_sim_args['config_builder'].disable('Spatial_Output')
        mod.run_sim_args['config_builder'].params[
            'Spatial_Output_Channels'] = []

    # now run if no preexisting experiment id was provided
    if not args.experiment_id:
        # we must always block so that we can run the analysis at the end; run and analyze!
        args.blocking = True
        experiment_manager = run(args, unknownArgs)
        experiment = experiment_manager.experiment
        print('Done running experiment: %s' % experiment.exp_id)
    else:
        experiment = retrieve_experiment(args.experiment_id)

    # Create an analyze manager
    am = AnalyzeManager(exp_list=[experiment], verbose=False)

    # Add the TimeSeriesAnalyzer to the manager and do analysis
    # ck4, is there a better way to specify the first 4 arguments? The DTKCase from Test-land might be nicer.
    # After all, the names COULD be different
    analyzer = FidelityReportAnalyzer(
        'catalyst_report',
        'config.json',
        mod.run_sim_args['config_builder'].get_param(
            'Demographics_Filenames')[0],
        experiment_definition=defn,
        experiment_id=experiment.exp_id,
        experiment_name=experiment.exp_name,
        label=args.report_label,
        time_series_step_from=defn['step_from'],
        time_series_step_to=defn['step_to'],
        time_series_equal_step_count=True,
        raw_data=True,
        debug=args.debug)
    am.add_analyzer(analyzer)
    am.analyze()

    import webbrowser
    webbrowser.open_new("file:///{}".format(
        os.path.join(os.getcwd(), "catalyst_report", "summary_report.html")))
    SetupParser.init()

    # collect site-specific data to pass to builder functions
    COMPS_login("https://comps.idmod.org")
    sites = pd.read_csv("site_details.csv")

    print("finding collection ids and vector details")
    site_input_dir = os.path.join("input", "sites", "all")

    with open("species_details.json") as f:
        species_details = json.loads(f.read())

    if asset_exp_id:
        print("retrieving asset experiment")
        asset_expt = retrieve_experiment(asset_exp_id)
        template_asset = asset_expt.simulations[0].tags
        cb.set_exe_collection(template_asset["exe_collection_id"])
        cb.set_dll_collection(template_asset["dll_collection_id"])
        cb.set_input_collection(template_asset["input_collection_id"])

    # Find vector proportions for each vector in our site
    site_vectors = pd.read_csv(
        os.path.join(site_input_dir, "vector_proportions.csv"))
    simulation_setup(cb, species_details, site_vectors)

    # reporting
    for idx, row in site_vectors.iterrows():
        add_summary_report(cb,
                           age_bins=list(range(10, 130, 10)),
                           nodes={
        plt.ylabel("Fraction of sims eliminating")
        plt.title(vc_packs[i])
        plt.legend()

    plt.show()



if __name__=="__main__":
    if True:
        # analyzer_list = [ExtractInfectionResult()]
        analyzer_list = [ExtractInfectionResult(),
                         SimulationDirectoryMapAnalyzer(save_file="sim_map.csv")]
        exp_list = ["520818ca-ae3b-e911-a2c5-c4346bcb7273"]

        am = AnalyzeManager(force_analyze=True)

        for exp_name in exp_list:
            am.add_experiment(retrieve_experiment(exp_name))
        for a in analyzer_list:
            am.add_analyzer(a)

        am.analyze()

    if True:
        convert_infection_csv_to_elim()


    if False:
        plot_elim_curves(y=2)
Exemple #24
0
def plot_RDT(exp_id, sample, save_file=None, **kwargs):
    am = AnalyzeManager()
    am.add_experiment(retrieve_experiment(exp_id))
    am.add_analyzer(
        prevalence_plot_analyzer(catch, sample, save_file=save_file, **kwargs))
    am.analyze()
from dtk.utils.core.DTKConfigBuilder import DTKConfigBuilder
from examples.example_iterative.MyanmarSite import MyanmarCalibSite
from simtools.OutputParser import CompsDTKOutputParser
from simtools.SetupParser import SetupParser

from calibtool.CalibManager import CalibManager
from simtools.Utilities.Experiments import retrieve_experiment
import pandas as pd
import os

SetupParser.init('HPC')

# Find experiment from whose config/campaigns we want to use (also get sweep params)
comparison_exp_id =  "9945ae69-3106-e711-9400-f0921c16849c"
sim_name = 'Rerun_Rampup_MDA_Better_Diagnostic'
expt = retrieve_experiment(comparison_exp_id)


df = pd.DataFrame([x.tags for x in expt.simulations])
df['outpath'] = pd.Series([sim.get_path() for sim in expt.simulations])

# generate cb object from the first of these files (the only difference will be in the sweep params)
cb_dir = df['outpath'][0]

cb = DTKConfigBuilder.from_files(config_name=os.path.join(cb_dir, 'config.json'),
                                 campaign_name=os.path.join(cb_dir, 'campaign.json'))

CompsDTKOutputParser.sim_dir_map = None
#cb.update_params({'Num_Cores': 1})

sites = [
Exemple #26
0
            int(x["year"]) + 2010,
            str(int(x["month"])).zfill(2)),
                              axis=1)
        # print("mdate")
        df["mdate"] = df.apply(lambda x: date_to_mdate(x["date"]), axis=1)
        # print("plot")
        # ax.plot(df["year"] * 12 + df["month"], df["cases"], *args, **kwargs)
        ax.plot_date(df["mdate"], df["cases"], *args, **kwargs)

        ax.set_xlabel("Date")
        ax.set_ylabel("Cases")
        ax.set_xlim([date_to_mdate("2010-01-01"), date_to_mdate("2017-01-01")])
        # ax.tick_params(direction="inout")

    def uid(self):
        ''' A unique identifier of site-name and analyzer-name. '''
        return '_'.join([self.site.name, self.name])


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # Calibration experiments:
    am.add_experiment(
        retrieve_experiment("a0bee2bd-f8b5-e811-a2c0-c4346bcb7275"))

    am.add_analyzer(incidence_likelihood(zambia_calib_site("bbondo")))
    am.analyze()
Exemple #27
0
 def create_parsers_for_experiment_from_simulation(self, exp_id):
     experiment = retrieve_experiment(exp_id)
     self.create_parsers_for_experiment(experiment)

if __name__=="__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # Corrected stepd
    # am.add_experiment(retrieve_experiment("43cac760-cbd6-e711-9414-f0921c16b9e5")) # bbondo
    # am.add_experiment(retrieve_experiment("a31b516a-cbd6-e711-9414-f0921c16b9e5"))  # chabbobboma
    # am.add_experiment(retrieve_experiment("1ecdf372-cbd6-e711-9414-f0921c16b9e5")) # chisanga
    # am.add_experiment(retrieve_experiment("957e6159-32d6-e711-9414-f0921c16b9e5")) # chiyabi
    # am.add_experiment(retrieve_experiment("9669907b-cbd6-e711-9414-f0921c16b9e5"))  # luumbo
    # am.add_experiment(retrieve_experiment("fbe40809-ccd6-e711-9414-f0921c16b9e5"))  # munyumbwe
    # am.add_experiment(retrieve_experiment("8aadd6a0-cbd6-e711-9414-f0921c16b9e5"))  # nyanga chaamwe
    # am.add_experiment(retrieve_experiment("d18a9aa8-cbd6-e711-9414-f0921c16b9e5"))  # sinafala
    am.add_experiment(retrieve_experiment("d28a9aa8-cbd6-e711-9414-f0921c16b9e5"))  # sinamalima

    # Old MBGSR
    # am.add_experiment(retrieve_experiment("7f188957-2fe1-e711-9414-f0921c16b9e5")) # bbondo
    # am.add_experiment(retrieve_experiment("f60d69eb-2fe1-e711-9414-f0921c16b9e5"))  # chabbobboma
    # am.add_experiment(retrieve_experiment("7aa30068-2fe1-e711-9414-f0921c16b9e5")) # chisanga
    # am.add_experiment(retrieve_experiment("d57bccae-25e1-e711-9414-f0921c16b9e5")) # chiyabi
    # am.add_experiment(retrieve_experiment("5d5cff6d-2fe1-e711-9414-f0921c16b9e5"))  # luumbo
    # am.add_experiment(retrieve_experiment("cf37cd7b-2fe1-e711-9414-f0921c16b9e5"))  # munyumbwe
    # am.add_experiment(retrieve_experiment("94aa85fb-2fe1-e711-9414-f0921c16b9e5"))  # nyanga chaamwe
    # am.add_experiment(retrieve_experiment("f5c0fb13-30e1-e711-9414-f0921c16b9e5"))  # sinafala
    # am.add_experiment(retrieve_experiment("33b92b39-30e1-e711-9414-f0921c16b9e5"))  # sinamalima

    am.add_analyzer(RDTPrevAnalyzer())
    am.analyze()
        plt.tight_layout()
        # if self.save_file:
        #     # if self.cait_output_mode:
        #     #     MozambiqueExperiment.save_figs_for_caitlin(fig,self.save_file)
        #     # else:
        if not self.save_file:
            self.save_file = save_file = "figs/{}".format(self.catch)
        # plt.savefig(self.save_file + ".pdf")
        # plt.savefig(self.save_file + ".png")
        # else:
        plt.show()
        print("Done!")


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()
    # am.add_experiment(retrieve_experiment("0a373d77-1f93-e811-a2c0-c4346bcb7275")) # chichuco
    # am.add_experiment(retrieve_experiment("0d801fc0-3c92-e811-a2c0-c4346bcb7275")) # chicutso
    am.add_experiment(
        retrieve_experiment(
            "c5c3c5bb-a79c-e811-a2c0-c4346bcb7275"))  # magude-sede-facazissa
    # am.add_experiment(retrieve_experiment("210bcb89-e696-e811-a2c0-c4346bcb7275")) # mahel
    # am.add_experiment(retrieve_experiment("10238aac-7593-e811-a2c0-c4346bcb7275")) # mapulanguene
    # am.add_experiment(retrieve_experiment("85bef741-2d97-e811-a2c0-c4346bcb7275")) # moine
    # am.add_experiment(retrieve_experiment("140fe8a7-1194-e811-a2c0-c4346bcb7275")) # motaze
    # am.add_experiment(retrieve_experiment("b1c79146-6194-e811-a2c0-c4346bcb7275")) # panjane-caputine

    am.add_analyzer(PrevAnalyzer())
    am.analyze()
Exemple #30
0
        # plt.xlim([3000,7000])
        plt.xlim([foo("2010-01-01"), foo("2019-01-01")])

        plt.tight_layout()
        plt.show()
        # plt.savefig(self.base + "data/figs/{}_prev.png".format(catch))


if __name__ == "__main__":
    SetupParser.init('HPC')

    am = AnalyzeManager()

    # Calibration experiments:
    am.add_experiment(
        retrieve_experiment("66f05adf-c10b-e811-9415-f0921c16b9e5"))

    # hand-fudged Milen habitat params
    # am.add_experiment(retrieve_experiment("4766b178-f5f4-e711-9414-f0921c16b9e5")) #bbondo
    # am.add_experiment(retrieve_experiment("34213b5c-f8f4-e711-9414-f0921c16b9e5"))  # chabbobboma
    # am.add_experiment(retrieve_experiment("84d95a7a-faf4-e711-9414-f0921c16b9e5"))  # chisanga
    # am.add_experiment(retrieve_experiment("c6313998-faf4-e711-9414-f0921c16b9e5")) # chiyabi
    # am.add_experiment(retrieve_experiment("69c0e4de-faf4-e711-9414-f0921c16b9e5"))  # luumbo
    # am.add_experiment(retrieve_experiment("4f045b1b-fbf4-e711-9414-f0921c16b9e5"))  # munyumbwe
    # am.add_experiment(retrieve_experiment("542b05fe-fbf4-e711-9414-f0921c16b9e5"))  # nyanga chaamwe (x0.5)
    # am.add_experiment(retrieve_experiment("b546a866-04f5-e711-9414-f0921c16b9e5"))  # nyanga chaamwe (x0.25)
    # am.add_experiment(retrieve_experiment("a938d951-06f5-e711-9414-f0921c16b9e5"))  # nyanga chaamwe (x0.15)
    # am.add_experiment(retrieve_experiment("47bc7d56-fcf4-e711-9414-f0921c16b9e5"))  # sinafala
    # am.add_experiment(retrieve_experiment("cd2853cf-fcf4-e711-9414-f0921c16b9e5"))  # sinamalima

    # Milen habitat params