def log_element_run_to_db(element_run_id, status, description):

    db.execute(
        """
        exec dbo.LogElementRun
            @ElementRunID = ?,
            @Status = ?,
            @Description = ?
        """, element_run_id, status, description)
def update_process_run_status(processrunid, status):
    db.execute(
        """
        UPDATE
            dbo.ProcessRun
        SET
            ProcessRunStatus = ?
        WHERE
            ProcessRunID = ?
        """, status, processrunid)
def generate_oasis_files_records_outputs(processrunid, process_run_locationid):
    db.execute(
        """
        exec dbo.generateOasisFilesRecordsOutputs 
            @ProcessRunId = ?, 
            @LocationID = ?,
            @GulSummaryXrefFileName = ?,
            @FMSummaryXrefFileName = ?
        """, processrunid, process_run_locationid, 'gulsummaryxref.csv',
        'fmsummaryxref.csv')
def generate_oasis_file_records(progoasisid, location_id):
    db.execute(
        """
        exec dbo.generateOasisFilesRecords 
            @ProgOasisId = ?,
            @LocationID = ?, 
            @ItemsFileName = 'items.csv', 
            @CoveragesFileName = 'coverages.csv',
            @ItemDictFileName = 'ItemDict.csv',
            @FMProgrammeFileName = 'fm_programme.csv',
            @FMPolicyTCFileName = 'fm_policytc.csv',
            @FMProfileFileName = 'fm_profile.csv',
            @FMXRefFileName = 'fm_xref.csv',
            @FMDictFileName = 'FMDict.csv'
        """, progoasisid, location_id)
Example #5
0
def do_run_prog_oasis(processrunid):
    '''
    Run a programme model combination.
    '''

    element_run_ids = list()
    element_run_id = -1

    flamingo_db_utils.update_process_run_status(processrunid, "In Progress")

    try:
        base_url = flamingo_db_utils.get_base_url(processrunid)
        element_run_ids = \
            flamingo_db_utils.get_element_run_ids(processrunid)

        upload_directory = generate_summary_files(processrunid)
        logging.getLogger().debug(
            "Upload_directory: {}".format(upload_directory))

        analysis_settings_json = get_analysis_settings_json(processrunid)
        logging.getLogger().debug(analysis_settings_json)

        if 'il_output' in analysis_settings_json['analysis_settings']:
            create_il_bins = analysis_settings_json['analysis_settings'][
                'il_output']
        else:
            create_il_bins = False
        analysis_poll_interval_in_seconds = 5
        client = OasisAPIClient(base_url, logging.getLogger())

        element_run_id = element_run_ids[0][0]
        input_location = client.upload_inputs_from_directory(
            upload_directory,
            do_il=create_il_bins,
            do_build=True,
            do_clean=True)
        logging.getLogger().info("Input location: {}".format(input_location))

        flamingo_db_utils.log_element_run_to_db(
            element_run_id, "Success",
            "Exposure files location: {}".format(input_location))

        element_run_id = element_run_ids[1][0]
        analysis_status_location = client.run_analysis(analysis_settings_json,
                                                       input_location)

        flamingo_db_utils.log_element_run_to_db(element_run_id, "Success",
                                                "Started analysis")

        element_run_id = element_run_ids[2][0]
        outputs_location = ""
        while True:
            logging.getLogger().debug("Polling analysis status for: {}".format(
                analysis_status_location))
            (status, outputs_location) = \
                client.get_analysis_status(analysis_status_location)
            flamingo_db_utils.log_element_run_to_db(element_run_id, status,
                                                    "In Progress")

            if status == status_code.STATUS_SUCCESS:
                if outputs_location is None:
                    raise Exception("Complete but no outputs location")
                flamingo_db_utils.log_element_run_to_db(
                    element_run_id, status, "Analysis Completed")
                break
            elif status == status_code.STATUS_FAILURE:
                error_message = "Analysis failed: {}".format(message)
                logging.getLogger().error(error_message)
                raise Exception(error_message)
            time.sleep(analysis_poll_interval_in_seconds)
        element_run_id = element_run_ids[3][0]

        # download outputs and cleanup
        outputs_file = os.path.join(upload_directory,
                                    outputs_location + ".tar.gz")
        client.download_outputs(outputs_location, outputs_file)
        client.delete_exposure(input_location)
        client.delete_outputs(outputs_location)
        flamingo_db_utils.log_element_run_to_db(
            element_run_id, 'Success', 'Downloaded output files successfully')

        extract_tarball(
            os.path.join(upload_directory, outputs_location + ".tar.gz"),
            upload_directory)

        output_file_list = ','.join(
            map(str, os.listdir(os.path.join(upload_directory, "output"))))
        logging.getLogger().debug(
            "Output_file_list: {}".format(output_file_list))
        db.execute(
            "exec dbo.linkOutputFileToProcessRun @ProcessRunId = ?, @OutputFiles = ?",
            processrunid, output_file_list)
        flamingo_db_utils.update_process_run_status(processrunid, 'Completed')

        # append summary id meanings to output files
        output_file_details = flamingo_db_utils.get_output_file_details(
            processrunid)
        logging.getLogger().debug(
            "output_file_details: {}".format(output_file_details))
        columns = [
            "FileName", "FileDesc", "PerspectiveName", "OutputID", "LECFlag",
            "AnalysisFileNameStub", "SummaryLevelName"
        ]
        df_output_file_details = pd.DataFrame(columns=columns)
        recs = map(lambda tup: dict(zip(columns, list(tup))),
                   output_file_details)
        df_output_file_details = df_output_file_details.append(recs)
        logging.getLogger().debug(
            "df_output_file_details:\n{}".format(df_output_file_details))

        prog_oasis_location = flamingo_db_utils.get_prog_oasis_location(
            processrunid)
        itemdict = prog_oasis_location + '/ItemDict.csv'
        fmdict = prog_oasis_location + '/FMDict.csv'
        df_itemdict = pd.read_csv(itemdict)
        df_fmdict = pd.read_csv(fmdict)
        df_fmdict["policy_layer"] = df_fmdict["policy_name"].map(
            str) + '--' + df_fmdict["layer_name"].map(str)

        for index, row in df_output_file_details.iterrows():
            output = upload_directory + '/output/' + row['FileName']
            logging.getLogger().debug("FileName: {}".format(output))
            output_tmp = output + '.tmp'
            SummaryLevelName = row['SummaryLevelName']
            df_output = pd.read_csv(output)
            SummaryLevelId = SummaryLevelName.lower() + '_id'
            SummaryLevelDesc = SummaryLevelName.lower() + '_desc'
            logging.getLogger().debug(
                "SummaryLevelName: {}".format(SummaryLevelName))
            if SummaryLevelName != "Portfolio":
                if SummaryLevelName == "Policy":
                    # join fmdict to file
                    df_summarydict = df_fmdict[['agg_id', 'policy_layer']]
                    logging.getLogger().debug(
                        "df_summarydict: {}".format(df_summarydict))
                    df_summarydict_distinct = df_summarydict.drop_duplicates()
                    df_output_temp = df_output.join(
                        df_summarydict_distinct.set_index('agg_id'),
                        on='summary_id')
                else:
                    # join itemdict to file
                    df_summarydict = df_itemdict[[
                        SummaryLevelId, SummaryLevelDesc
                    ]]
                    df_summarydict_distinct = df_summarydict.drop_duplicates()
                    df_output_temp = df_output.join(
                        df_summarydict_distinct.set_index(SummaryLevelId),
                        on='summary_id')
                    logging.getLogger().debug(
                        "df_summarydict_distinct: {}".format(
                            df_summarydict_distinct))
                df_output_temp.to_csv(output, encoding='utf-8', index=False)

    except Exception as e:
        flamingo_db_utils.update_process_run_status(processrunid, "Failed")
        if element_run_id != -1:
            flamingo_db_utils.log_element_run_to_db(element_run_id, 'Failed: ',
                                                    str(e))
        logging.getLogger().exception(
            "Failed to run prog oasis: {}".format(processrunid))
def generate_oasis_files_outputs(processrunid):
    db.execute(
        "exec dbo.generateOasisFilesOutputs ?", processrunid)
def load_programme_data(progid):
    db.execute(
        "exec dbo.LoadProgrammeData ?", progid)
def generate_canonical_model(progid):
    db.execute(
        "exec dbo.generateCanonicalModel ?", progid)
def generate_output_transform_file_records_for_progoasis(progoasisid, modelfilename):
    db.execute(
        "exec dbo.generateOutputTransformFileRecordsForProgOasos ?, ?",
        progoasisid, modelfilename)
def generate_output_transform_file_records_for_prog(progid, canlocfilename, canaccfilename):
    db.execute(
        "exec dbo.generateOutputTransformFileRecordsForProg ?, ?, ?",
        progid, canlocfilename, canaccfilename)
def create_api_error_file_record(filename, progoasisid):
    db.execute(
        "exec createAPIErrorFileRecord ?, ?",
        filename, progoasisid)
def update_progoasis_status(progoasisid, status):
    db.execute(
        "exec dbo.updateProgOasisStatus?, ?", progoasisid, status)
def get_api_return_data(progoasisid, filename, sessionid):
    db.execute(
        "exec getAPI1aReturnData ?, ?, ?",
        progoasisid, filename, sessionid)
def update_prog_status(progid, status):
    db.execute(
        "exec dbo.updateProgStatus?, ?", progid, status)