Exemplo n.º 1
0
    def __init__(self,
                 bucketname='modeling-data.chesapeakebay.net',
                 log_level='INFO'):
        """ Move files between S3 and local

        Args:
            bucketname:
            verbose:
        """
        self.logger = set_up_detailedfilelogger(
            loggername=
            's3_operations',  # same name as module, so logger is shared
            filename=f"s3ops.log",
            level=log_level,
            also_logtoconsole=True,
            add_filehandler_if_already_exists=True,
            add_consolehandler_if_already_exists=False)

        # Check if running on AWS
        self.resp = None
        self.bucketname = bucketname
        self.s3 = None

        # TODO: replace the following code with a better check for whether we're are in AWS (e.g. on an EC2 host)
        # try:
        #     resp = requests.get('http://169.254.169.254', timeout=0.001)
        #     self.logger.info('In AWS')
        # except:
        #     raise EnvironmentError('Not In AWS')

        self.s3 = boto3.client('s3')
Exemplo n.º 2
0
def build_model(model_spec_name,
                geoscale,
                geoentities,
                baseloadingfilename,
                savedata2file=False,
                log_level='INFO'):
    """Generate a model for the efficiency BMPs.

    Args:
        model_spec_name (str or dict): path to a model specification file or a dictionary of model spec values
        geoscale (str):
        geoentities (list):
        baseloadingfilename (str):
        savedata2file (bool):
        log_level (:obj:`str`, optional): The log-level for the model generation logger. Defaults to 'INFO'.

    Returns
        a Pyomo ConcreteModel

    """
    logger = set_up_detailedfilelogger(
        loggername=__name__,
        filename='bayota_model_generation.log',
        level=log_level,
        also_logtoconsole=True,
        add_filehandler_if_already_exists=False,
        add_consolehandler_if_already_exists=False)
    """ Initialization; Get Data """
    specdict = read_spec(spec_file_name=model_spec_name, spectype='model')
    dataplate = get_dataplate(geoscale=geoscale,
                              geoentities=[geoentities],
                              savedata2file=savedata2file,
                              baseloadingfilename=baseloadingfilename)
    check_for_problems_in_data_before_model_construction(data=dataplate,
                                                         logger=logger)
    """ Build the model skeleton (sets, parameters, and variables) """
    variant = specdict['variant']
    if variant == 'nlp':
        builder = NonlinearVariant(logger=logger)
    elif variant == 'lp':
        builder = LinearVariant(logger=logger)
    else:
        raise ValueError(f"unrecognized model variant <{variant}>")

    model = builder.build_model(dataplate,
                                geoscale=geoscale,
                                specdict=specdict)
    """ The model is validated. """
    check_for_problems_post_model_construction(model, logger)

    return model, dataplate
Exemplo n.º 3
0
def main(control_file,
         dryrun=False,
         no_slurm=False,
         save_to_s3=False,
         log_level='INFO') -> int:
    version = get_bayota_version()

    # Control file is read.
    control_dict, \
    actionlist, \
    compact_geo_entity_str, \
    expid, \
    expname, \
    list_of_trialdicts, \
    saved_model_file, \
    studyid = read_expcon_file(control_file)

    # Logging formats are set up.
    logger = set_up_detailedfilelogger(
        loggername=expname,  # same name as module, so logger is shared
        filename=f"step3_s{studyid}_e{expid}_{compact_geo_entity_str}.log",
        level=log_level,
        also_logtoconsole=True,
        add_filehandler_if_already_exists=True,
        add_consolehandler_if_already_exists=False)

    logger.info('----------------------------------------------')
    logger.info('************* Model Modification *************')
    logger.info('----------------------------------------------')

    # Job command is built and submitted.
    CMD = f"{modify_model_script} {control_file} --log_level={log_level}"
    if save_to_s3:
        CMD = CMD + ' --save_to_s3'
    if not no_slurm:
        slurm_options = f"--nodes={1} " \
                        f"--ntasks={1} " \
                        f"--cpus-per-task={1} " \
                        f"--exclusive "
        CMD = 'srun ' + slurm_options + CMD
    logger.info(f'Job command is: "{CMD}"')
    if notdry(dryrun, logger, '--Dryrun-- Would submit command, then wait.'):
        p = subprocess.Popen([CMD], shell=True)
        p.wait()
        if p.returncode != 0:  # Return code from process is checked.
            logger.error(
                f"Model Modification finished with non-zero code <{p.returncode}>"
            )
            return 1

    logger.info('----------------------------------------------')
    logger.info('*************** Trials looping ***************')
    logger.info('----------------------------------------------')

    # List of trial sets to be conducted for this experiment are logged.
    tempstr = 'set' if len(list_of_trialdicts) == 1 else 'sets'
    logger.info(
        f"{logprefix} {expname} - trial {tempstr} to be conducted: {list_of_trialdicts}"
    )

    # Loop through and start each trial
    trialnum = 0
    p_list = []
    for i, dictwithtrials in enumerate(list_of_trialdicts):
        logger.info(
            f'{logprefix} {expname} - trial set #{i}: {dictwithtrials}')

        modvar = dictwithtrials['variable']
        logger.info(f'variable to modify: {modvar}')

        varvalue = dictwithtrials['value']
        logger.info('values: %s' % varvalue)

        varindexer = None
        try:
            varindexer = dictwithtrials['indexer']
            logger.info(f'indexed over: {varindexer}')
        except KeyError:
            pass

        for vi in varvalue:
            trialnum += 1
            trialidstr = '{:04}'.format(trialnum)

            logger.info(f'trial #{trialidstr}, setting <{modvar}> to <{vi}>')
            modificationstr = f"\'{{\"variable\": \"{modvar}\", " \
                              f"\"value\": {vi}, " \
                              f"\"indexer\": \"{varindexer}\"}}\'"

            # A trial control ("trialcon") file is generated by adding to the expcon file.
            control_dict['trial'] = {
                'id':
                trialidstr,
                'trial_name':
                'exp--' + expname + '--_modvar--' + modvar + '--_trial' +
                trialidstr,
                'modification':
                modificationstr,
                'solutions_folder_name':
                expname
            }
            control_dict['trial']['uuid'] = control_dict['experiment'][
                'uuid'] + '_t' + trialidstr
            control_dict['code_version']: version
            control_dict['run_timestamps'][
                'step4_trial'] = datetime.datetime.today().strftime(
                    '%Y-%m-%d-%H:%M:%S')
            unique_control_name = write_control_with_uniqueid(
                control_dict=control_dict,
                name_prefix='step4_trialcon',
                logger=logger)

            # Job command is built and submitted.
            CMD = f"{solve_trial_script} {unique_control_name} --log_level={log_level}"
            if save_to_s3:
                CMD = CMD + ' --save_to_s3'
            if not no_slurm:
                slurm_options = f"--nodes={1} " \
                                f"--ntasks={1} " \
                                f"--cpus-per-task={2} " \
                                f"--exclusive "
                CMD = 'srun ' + slurm_options + CMD
            logger.info(f'Job command is: "{CMD}"')
            if notdry(dryrun, logger, '--Dryrun-- Would submit command'):
                p_list.append(subprocess.Popen([CMD], shell=True))

    [p.wait() for p in p_list]

    return 0  # a clean, no-issue, exit
Exemplo n.º 4
0
def main(control_file, dryrun=False, no_slurm=False, save_to_s3=False, log_level='INFO') -> int:
    # Load and save new control file
    control_dict, \
    experiments, \
    baseloadingfilename, \
    geography_name, \
    compact_geo_entity_str, \
    model_spec_name, \
    studyshortname, \
    studyid = read_study_control_file(control_file)

    # Logging formats are set up.
    logger = set_up_detailedfilelogger(loggername=studyshortname,  # same name as module, so logger is shared
                                       filename=f"step1_s{studyid}_{compact_geo_entity_str}.log",
                                       level=log_level,
                                       also_logtoconsole=True,
                                       add_filehandler_if_already_exists=True,
                                       add_consolehandler_if_already_exists=False)

    logger.info('----------------------------------------------')
    logger.info('******* %s *******' % ('BayOTA').center(30, ' '))
    logger.info('*************** Single Study *****************')
    logger.info('----------------------------------------------')
    logger.info(f"Geography = {geography_name}")
    logger.info(f"Model specification name = {model_spec_name}")
    logger.info(f"Experiments = {experiments}")
    logger.info(f"Base_loading_file_name = {baseloadingfilename}")
    logger.info('')
    logger.info('----------------------------------------------')
    logger.info('************** Model Generation **************')
    logger.info('----------------------------------------------')

    # Job command is built and submitted.
    CMD = f"{model_generator_script} {control_file} --log_level={log_level}"
    if save_to_s3:
        CMD = CMD + ' --save_to_s3'
    if not no_slurm:
        slurm_options = f"--nodes={1} " \
                        f"--ntasks={1} " \
                        f"--exclusive "
        CMD = 'srun ' + slurm_options + CMD
    logger.info(f'Job command is: "{CMD}"')
    if notdry(dryrun, logger, '--Dryrun-- Would submit command, then wait.'):
        p = subprocess.Popen([CMD], shell=True)
        p.wait()
        if p.returncode != 0:    # Return code from process is checked.
            logger.error(f"Model Generator finished with non-zero code <{p.returncode}>")
            return 1

    # A job is submitted for each experiment in the list.
    p_list = []
    for ii, exp_spec_name in enumerate(experiments):
        expid = '{:04}'.format(ii+1)
        logger.info(f"Exp. #{expid}: {exp_spec_name}")

        expactiondict = read_spec(spec_file_name=exp_spec_name, spectype='experiment')

        # An experiment control ("expcon") file is generated by adding to the studycon file.
        try:
            del control_dict["experiments"]
        except KeyError:
            logger.info("Key 'experiments' not found")
        control_dict['experiment_name'] = exp_spec_name
        expactiondict['id'] = expid
        control_dict['experiment'] = expactiondict
        control_dict['experiment']['uuid'] = control_dict['study']['uuid'] + '_e' + expid
        unique_control_name = write_control_with_uniqueid(control_dict=control_dict, name_prefix='step3_expcon',
                                                          logger=logger)

        # Job command is built and submitted.
        CMD = f"{experiment_script} {unique_control_name} --log_level={log_level}"
        if save_to_s3:
            CMD = CMD + ' --save_to_s3'
        if no_slurm:
            CMD = CMD + " --no_slurm"
        logger.info(f'Job command is: "{CMD}"')
        if notdry(dryrun, logger, '--Dryrun-- Would submit command'):
            p_list.append(subprocess.Popen([CMD], shell=True))

    if notdry(dryrun, logger, '--Dryrun-- Would wait'):
        [p.wait() for p in p_list]

    return 0  # a clean, no-issue, exit
Exemplo n.º 5
0
def main(control_file, dryrun=False, use_s3_ws=False, save_to_s3=False, log_level='INFO') -> int:
    if save_to_s3 or use_s3_ws:
        # Connection with S3 is established.
        s3ops = establish_s3_connection(log_level, logger=None)

    # If using s3, required workspace directories are pulled from buckets.
    if use_s3_ws:
        pull_workspace_subdir_from_s3(subdirname='control', s3ops=s3ops, log_level=log_level)
        pull_workspace_subdir_from_s3(subdirname='data', s3ops=s3ops, log_level=log_level)
        pull_workspace_subdir_from_s3(subdirname='specfiles', s3ops=s3ops, log_level=log_level)

    # Control file is read.
    control_dict, \
    compact_geo_entity_str, \
    expid, \
    model_modification_string, \
    move_CASTformatted_solution_to_s3, \
    move_solution_to_s3, \
    objective_and_constraint_str, \
    s3_base_path, \
    saved_model_name, \
    saved_model_file, \
    solutions_folder_name, \
    studyid, \
    studyshortname, \
    translate_to_cast_format, \
    trial_name, \
    trialidstr = read_trialcon_file(control_file_name=control_file)

    # Logging formats are set up.
    trial_logfilename = f"bayota_step4_s{studyid}_e{expid}_t{trialidstr}_{compact_geo_entity_str}"
    logger = set_up_detailedfilelogger(loggername=trial_name,  # same name as module, so logger is shared
                                       filename=trial_logfilename + '.log',
                                       level=log_level,
                                       also_logtoconsole=True,
                                       add_filehandler_if_already_exists=True,
                                       add_consolehandler_if_already_exists=False)
    logger_study = set_up_detailedfilelogger(loggername=studyshortname,  # same name as module, so logger is shared
                                             filename=f"step1_s{studyid}_{compact_geo_entity_str}.log",
                                             level=log_level,
                                             also_logtoconsole=True,
                                             add_filehandler_if_already_exists=True,
                                             add_consolehandler_if_already_exists=False)
    logger_feasibility = set_up_detailedfilelogger(loggername='feasibility',
                                                   filename='bayota_feasibility.log',
                                                   level='info',
                                                   also_logtoconsole=True,
                                                   add_filehandler_if_already_exists=False,
                                                   add_consolehandler_if_already_exists=False)
    logger.debug(f"control file being used is: {control_file}")

    # If using s3, saved model instance is pulled from bucket.
    if use_s3_ws:
        pull_model_instance_from_s3(log_level=log_level, model_instance_name=saved_model_name, s3ops=s3ops)

    # Progress report is updated.
    progress_dict = read_control(control_file_name=control_dict['experiment']['uuid'])
    progress_dict['run_timestamps']['step4_trial_start'] = datetime.datetime.today().strftime('%Y-%m-%d-%H:%M:%S')
    trial_uuid = control_dict['trial']['uuid']

    # *****************************
    # Make Model Modification(s)
    # *****************************
    my_model = load_model_pickle(savepath=saved_model_file, dryrun=dryrun, logprefix=logprefix)
    # Modification string is converted into a proper dictionary.
    modification_dict_withtrials = json.loads(model_modification_string)
    if not modification_dict_withtrials:
        modvar = None
        varvalue = None
    else:
        modvar, varvalue = make_model_modification(modification_dict_withtrials, dryrun, my_model, logger)

    # *********************
    # Solve
    # *********************
    notreal_notimestamp_outputdfpath = os.path.join(get_output_dir(s3=False), f"solution_{trial_name}_<timestamp>.csv")

    if notdry(dryrun, logger, f"--Dryrun-- Would run trial and save outputdf at: {notreal_notimestamp_outputdfpath}"):
        # ---- Directory preparation ----
        s3_destination_dir = s3_base_path + compact_geo_entity_str + '/' + objective_and_constraint_str + '/'
        # Solutions directory is created if it doesn't exist.
        solutions_dir = os.path.join(get_output_dir(s3=False), solutions_folder_name)
        logger.debug(f"solutions_dir = {solutions_dir}")
        os.makedirs(solutions_dir, exist_ok=True)

        solvehandler = SolveHandler()

        solver_log_file = os.path.join(get_logging_dir(s3=False), trial_logfilename + '_ipopt.log')
        solver_iters_file = os.path.join(get_logging_dir(s3=False), trial_uuid + '_ipopt.iters')

        # The problem is solved.
        solution_dict = solvehandler.basic_solve(mdl=my_model, translate_to_cast_format=translate_to_cast_format,
                                                 solverlogfile=solver_log_file, solveritersfile=solver_iters_file)
        solution_dict['solution_df']['feasible'] = solution_dict['feasible']
        logger.info(f"Trial '{trial_name}' is DONE "
                    f"(@{solution_dict['timestamp']})! "
                    f"<Solution feasible? --> {solution_dict['feasible']}> ")
        logger_feasibility.info(f"<feasible: {solution_dict['feasible']}> for {saved_model_name}_{trial_name}")
        logger_study.info(f"trial {trial_name} is DONE")

        # The progress file is updated, then moved to output directory in s3.
        progress_dict['run_timestamps']['step4_trial_done'] = datetime.datetime.today().strftime('%Y-%m-%d-%H:%M:%S')
        iters, ipopt_time, n_vars, n_ineq_constraints, n_eq_constraints = IpoptParser().quickparse(solver_log_file)
        progress_dict['solve_characteristics'] = {'n_iterations': iters,
                                                  'solve_time (s)': ipopt_time,
                                                  'n_variables': n_vars,
                                                  'n_ineq_constraints': n_ineq_constraints,
                                                  'n_eq_constraints': n_eq_constraints}
        progress_file_name = write_progress_file(progress_dict, control_name=trial_uuid)

        if save_to_s3:
            # Progress file is moved to s3.
            return_code = s3ops.move_to_s3(local_path=os.path.join(get_control_dir(s3=False), progress_file_name + '.yaml'),
                                           destination_path=f"{s3_destination_dir + progress_file_name + '.yaml'}")
            logger.info(f"Move the progress file to s3 - exited with code <{return_code}>")
            # Solver output is moved to s3 also.
            return_code = s3ops.move_to_s3(local_path=solver_log_file,
                                           destination_path=f"{s3_destination_dir + trial_uuid + '_ipopt.log'}")
            logger.info(f"Move the solver log file to s3 - exited with code <{return_code}>")
            return_code = s3ops.move_to_s3(local_path=solver_iters_file,
                                           destination_path=f"{s3_destination_dir + trial_uuid + '_ipopt.iters'}")
            logger.info(f"Move the solver iters file to s3 - exited with code <{return_code}>")

        # Optimization objective value is added to the solution table.
        ii = 0
        for objective_component in my_model.component_objects(pyo.Objective):
            if ii < 1:
                # check whether Objective is an "indexed" component or not
                if objective_component._index == {None}:
                    solution_dict['solution_df']['solution_objective'] = pyo.value(objective_component)
                else:
                    for cidxpart in objective_component:
                        if objective_component[cidxpart].active:
                            solution_dict['solution_df']['solution_objective'] = pyo.value(objective_component[cidxpart])

                ii += 1
            else:
                logger.info('more than one objective found, only using one')
                break

        # Value of modified variable is added to the solution table.
        solution_dict['solution_df'][modvar] = varvalue
        # solution_dict['solution_df']['solution_mainconstraint_Percent_Reduction'] = pyo.value(mdlhandler.model.Percent_Reduction['N'].body)

        # *********************
        # Solution is saved.
        # *********************

        # Optimization solution table is written to file (uses comma-delimiter and .csv extention)
        solution_shortname = f"{trial_name}_{solution_dict['timestamp']}.csv"
        solution_fullname = f"{saved_model_name}_{trial_name}_{solution_dict['timestamp']}.csv"

        outputdfpath_bayotaformat = os.path.join(solutions_dir, solution_fullname)
        solution_dict['solution_df'].to_csv(outputdfpath_bayotaformat)
        logger.info(f"<Solution written to: {outputdfpath_bayotaformat}>")
        logger_study.info(f"<trial {trial_name} - solution written to: {outputdfpath_bayotaformat}>")

        if save_to_s3 and move_solution_to_s3:
            return_code = s3ops.move_to_s3(local_path=outputdfpath_bayotaformat,
                                           destination_path=f"{s3_destination_dir + solution_shortname}")
            logger.info(f"Move-the-solution-to-s3 script exited with code <{return_code}>")
            logger_study.info(f"trial {trial_name} - move-the-solution-to-s3 script exited with code <{return_code}>")

        # CAST-formatted solution table is written to file (uses tab-delimiter and .txt extention).
        if translate_to_cast_format:
            solution_shortname_castformat = f"castformat_{trial_name}_{solution_dict['timestamp']}.txt"
            solution_fullname_castformat = f"castformat_{saved_model_name}_{trial_name}_{solution_dict['timestamp']}.txt"

            outputdfpath_castformat = os.path.join(solutions_dir, solution_fullname_castformat)
            csv_string = solution_dict['cast_formatted_df'].to_csv(None, sep='\t', header=True,
                                                                   index=False, line_terminator='\r\n')
            open(outputdfpath_castformat, 'w').write(csv_string[:-2])  # -2 to remove blank line at end of file
            logger.info(f"<CAST-formatted solution written to: {outputdfpath_castformat}>")

            if save_to_s3 and move_CASTformatted_solution_to_s3:
                return_code = s3ops.move_to_s3(local_path=outputdfpath_castformat,
                                               destination_path=f"{s3_destination_dir + solution_shortname_castformat}")
                logger.info(f"Move-the-solution-to-s3 script exited with code <{return_code}>")

    return 0  # a clean, no-issue, exit
Exemplo n.º 6
0
def main(control_file, dryrun=False, use_s3_ws=False, save_to_s3=False, log_level='INFO') -> int:
    if save_to_s3 or use_s3_ws:
        # Connection with S3 is established.
        s3ops = establish_s3_connection(log_level, logger=None)

    # If using s3, required workspace directories are pulled from buckets.
    if use_s3_ws:
        pull_workspace_subdir_from_s3(subdirname='control', s3ops=s3ops, log_level=log_level)
        pull_workspace_subdir_from_s3(subdirname='data', s3ops=s3ops, log_level=log_level)
        pull_workspace_subdir_from_s3(subdirname='specfiles', s3ops=s3ops, log_level=log_level)

    # Control file is read.
    control_dict, \
    actionlist, \
    compact_geo_entity_str, \
    expid, \
    expname, \
    list_of_trialdicts, \
    saved_model_file, \
    studyid = read_expcon_file(control_file)

    # Logging formats are set up.
    logger = set_up_detailedfilelogger(loggername=expname,  # same name as module, so logger is shared
                                       filename=f"step3_s{studyid}_e{expid}_{compact_geo_entity_str}.log",
                                       level=log_level,
                                       also_logtoconsole=True,
                                       add_filehandler_if_already_exists=True,
                                       add_consolehandler_if_already_exists=False)

    # If using s3, saved model instance is pulled from bucket.
    if use_s3_ws:
        pull_model_instance_from_s3(log_level=log_level, model_instance_name=saved_model_file, s3ops=s3ops)

    # Progress report is updated.
    progress_dict = read_control(control_file_name=control_dict['study']['uuid'])
    progress_dict['run_timestamps']['step3b_expmodification_start'] = datetime.datetime.today().strftime('%Y-%m-%d-%H:%M:%S')

    # The model is modified according to specified experiment set-up
    logger.info(f"{logprefix} {expname} - modification action list = {actionlist}")
    if notdry(dryrun, logger, '--Dryrun-- Would modify model with action <%s>' % actionlist):

        # Check whether any model modifications are specified
        if actionlist[0] == 'none':
            logger.info(f"{logprefix} {expname} - no model modifications made")
        else:
            # Load the model object
            my_model = load_model_pickle(savepath=saved_model_file, dryrun=dryrun)

            for a in actionlist:
                modify_model(my_model, actiondict=a)

            save_model_pickle(model=my_model, savepath=saved_model_file, dryrun=dryrun, logprefix=logprefix)

    # Progress report is finalized with timestamp and saved.
    progress_dict['run_timestamps']['step3b_expmodification_done'] = datetime.datetime.today().strftime(
        '%Y-%m-%d-%H:%M:%S')
    progress_file_name = write_progress_file(progress_dict, control_name=control_dict['experiment']['uuid'])
    if save_to_s3:
        move_controlfile_to_s3(logger, s3ops, controlfile_name=progress_file_name, no_s3=False, )

    return 0  # a clean, no-issue, exit
Exemplo n.º 7
0
def main(control_file,
         dryrun=False,
         use_s3_ws=False,
         save_to_s3=False,
         log_level='INFO') -> int:
    if save_to_s3 or use_s3_ws:
        # Connection with S3 is established.
        s3ops = establish_s3_connection(log_level, logger=None)

    # If using s3, required workspace directories are pulled from buckets.
    if use_s3_ws:
        pull_workspace_subdir_from_s3(subdirname='control',
                                      s3ops=s3ops,
                                      log_level=log_level)
        pull_workspace_subdir_from_s3(subdirname='data',
                                      s3ops=s3ops,
                                      log_level=log_level)
        pull_workspace_subdir_from_s3(subdirname='specfiles',
                                      s3ops=s3ops,
                                      log_level=log_level)

    # Control file is read.
    control_dict, \
    baseloadingfilename, \
    compact_geo_entity_str, \
    geography_entity, \
    geography_scale, \
    model_spec_name, \
    saved_model_name, \
    savedata2file = read_model_controlfile(control_file_name=control_file)

    # Logging formats are set up.
    logger = set_up_detailedfilelogger(
        loggername=model_spec_name,
        filename=f"step2_modelgeneration_{compact_geo_entity_str}.log",
        level=log_level,
        also_logtoconsole=True,
        add_filehandler_if_already_exists=True,
        add_consolehandler_if_already_exists=False)

    logger.info('v--------------------------------------------v')
    logger.info(' ************** Model Generation ************')
    logger.info(' geographies specification: %s' % geography_entity)
    logger.info('^--------------------------------------------^')

    # A progress report is started.
    progress_dict = control_dict.copy()
    progress_dict['run_timestamps'] = {
        'step2_generatemodel_start':
        datetime.datetime.today().strftime('%Y-%m-%d-%H:%M:%S')
    }

    # Model is generated.
    my_model = None
    if notdry(dryrun, logger, '--Dryrun-- Would generate model'):
        starttime_modelinstantiation = time.time()  # Wall time - clock starts.

        my_model, dataplate = build_model(
            model_spec_name=model_spec_name,
            geoscale=geography_scale.lower(),
            geoentities=geography_entity,
            baseloadingfilename=baseloadingfilename,
            savedata2file=savedata2file)

        timefor_modelinstantiation = time.time(
        ) - starttime_modelinstantiation  # Wall time - clock stops.
        logger.info('*model instantiation done* <- it took %f seconds>' %
                    timefor_modelinstantiation)

    # Model is saved.
    if not saved_model_name:
        saved_model_name = 'saved_instance.pickle'
    savepath = os.path.join(get_model_instances_dir(), saved_model_name)
    logger.debug(f"*saving model as pickle to {savepath}*")
    save_model_pickle(model=my_model, savepath=savepath, dryrun=dryrun)
    logger.debug(f"*model saved*")

    # If using s3, model instance is moved to bucket.
    if save_to_s3:
        move_model_pickle_to_s3(logger, s3ops, savepath)

    # Progress report is finalized with timestamp and saved.
    progress_dict['run_timestamps'][
        'step2_generatemodel_done'] = datetime.datetime.today().strftime(
            '%Y-%m-%d-%H:%M:%S')
    progress_file_name = write_progress_file(
        progress_dict, control_name=control_dict['study']['uuid'])
    if save_to_s3:
        move_controlfile_to_s3(logger,
                               s3ops,
                               controlfile_name=progress_file_name,
                               no_s3=False)
    logger.debug(f"*model generation done*")
    return 0  # a clean, no-issue, exit