コード例 #1
0
def d600(cf, source_output_path, Table_mapping, Core_tables):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    try:
        core_tables_list= TransformDDL.get_src_core_tbls(Table_mapping)
        core_tbl_ddl=''
        for tbl_name in core_tables_list:
            col_ddl=''
            core_tbl_header = 'CREATE SET TABLE ' + cf.core_table + '.' +tbl_name+ ', FALLBACK (\n'

            for core_tbl_index, core_tbl_row in Core_tables[(Core_tables['Table name'] == tbl_name)].iterrows():
                col_ddl+= core_tbl_row['Column name']+ ' '+ core_tbl_row['Data type']+' '
                if (core_tbl_row['Data type'].find('VARCHAR')!= -1 ):
                    col_ddl+= 'CHARACTER SET UNICODE NOT CASESPECIFIC'+' '
                if (core_tbl_row['Mandatory']== 'Y' ):
                    col_ddl += 'NOT NULL '
                col_ddl+='\n ,'
          #  col_ddl= col_ddl[0:len(col_ddl)-1]
            core_tech_cols=	'Start_Ts	TIMESTAMP(6) WITH TIME ZONE \n'+',End_Ts	TIMESTAMP(6) WITH TIME ZONE \n'
            core_tech_cols+=",Start_Date	DATE FORMAT 'YYYY-MM-DD' \n"+",End_Date	DATE FORMAT 'YYYY-MM-DD' \n"
            core_tech_cols+=',Record_Deleted_Flag	BYTEINT \n'+',Ctl_Id	SMALLINT COMPRESS(997) \n'
            core_tech_cols+=',Process_Name	VARCHAR(128)\n'+',Process_Id	INTEGER \n'
            core_tech_cols+= ',Update_Process_Name	VARCHAR(128)\n'+',Update_Process_Id	INTEGER \n'
            core_tbl_pk=') UNIQUE PRIMARY INDEX (' + TransformDDL.get_trgt_pk(Core_tables, tbl_name) + '); \n  \n'
            core_tbl_ddl=core_tbl_header+col_ddl+core_tech_cols+core_tbl_pk
            f.write(core_tbl_ddl)

    except:
        funcs.TemplateLogError(cf.output_path, source_output_path, file_name, traceback.format_exc()).log_error()

    f.close()
コード例 #2
0
def bmap_dup_desc_check(cf, source_output_path, table_mapping, core_tables,
                        BMAP_VALUES):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    core_tables_look_ups = core_tables[core_tables['Is lookup'] == 'Y']
    core_tables_look_ups = core_tables_look_ups[
        core_tables_look_ups['Column name'].str.endswith(str('_DESC'))]
    count = 1
    lookup_tables_list = TransformDDL.get_src_lkp_tbls(table_mapping,
                                                       core_tables)
    code_set_names = TransformDDL.get_code_set_names(BMAP_VALUES)

    for code_set_name in code_set_names:
        for table_name in lookup_tables_list:
            if table_name == code_set_name:
                for core_table_index, core_table_row in core_tables_look_ups.iterrows(
                ):
                    if core_table_row['Table name'] == table_name:
                        call_line1 = "SEL " + core_table_row[
                            'Column name'] + " FROM " + cf.base_DB + "." + table_name
                        call_line2 = " GROUP BY " + core_table_row[
                            'Column name'] + " HAVING COUNT(*)>1;\n\n\n"
                        bmap_check_name_line = "---bmap_dup_check_desc_Test_Case_" + str(
                            count) + "---"

                        call_exp = bmap_check_name_line + "\n" + call_line1 + call_line2
                        f.write(call_exp)
                        count = count + 1
    f.close()
コード例 #3
0
def bmap_null_check(cf, source_output_path, table_mapping, core_tables,
                    BMAP_values):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    core_tables_look_ups = core_tables[core_tables['Is lookup'] == 'Y']
    count = 1
    lookup_tables_list = TransformDDL.get_src_lkp_tbls(table_mapping,
                                                       core_tables)
    code_set_names = TransformDDL.get_code_set_names(BMAP_values)
    for code_set_name in code_set_names:
        for table_name in lookup_tables_list:
            if table_name == code_set_name:
                CD_column = ''
                DESC_column = ''
                for core_table_index, core_table_row in core_tables_look_ups.iterrows(
                ):
                    if core_table_row['Table name'] == table_name:
                        if str(core_table_row['Column name']).endswith(
                                str('_CD')) and core_table_row['PK'] == 'Y':
                            CD_column = core_table_row['Column name']
                        if str(core_table_row['Column name']).endswith(
                                str('_DESC')):
                            DESC_column = core_table_row['Column name']
                bmap_check_name_line = "---bmap_null_check_Test_Case_" + str(
                    count) + "---"
                call_line1 = "SEL * FROM " + cf.base_DB + "." + table_name
                call_line2 = " WHERE " + CD_column + " IS NULL" + " OR " + DESC_column + " IS NULL;\n\n\n"
                call_exp = bmap_check_name_line + "\n" + call_line1 + call_line2
                f.write(call_exp)
                count = count + 1
    f.close()
コード例 #4
0
def d608(cf, source_output_path, source_name, STG_tables, Core_tables,
         BMAP_values):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    src_code_set_names = funcs.get_src_code_set_names(STG_tables, source_name)
    code_set_names = TransformDDL.get_code_set_names(BMAP_values)
    for code_set_name in src_code_set_names:
        for code_set in code_set_names:
            if code_set_name == code_set:
                tbl_pk = TransformDDL.get_trgt_pk(Core_tables, code_set)
                columns = TransformDDL.get_lkp_tbl_Cols(Core_tables, code_set)
                for bmap_values_indx, bmap_values_row in BMAP_values[
                    (BMAP_values['Code set name'] == code_set)
                        & (BMAP_values['Layer'] == 'CORE')][[
                            'EDW code', 'Description'
                        ]].drop_duplicates().iterrows():
                    del_st = "DELETE FROM " + cf.core_table + "." + code_set + " WHERE " + tbl_pk + " = '" + str(
                        bmap_values_row['EDW code']) + "';\n"
                    insert_into_st = "INSERT INTO " + cf.core_table + "." + code_set + "(" + columns + ")\nVALUES "
                    insert_values = ''
                    if columns.count(',') == 1:
                        insert_values = "(" + str(
                            bmap_values_row["EDW code"]) + ", '" + str(
                                bmap_values_row["Description"]) + "');\n\n"
                    elif columns.count(',') == 2:
                        insert_values = "(" + str(
                            bmap_values_row["EDW code"]) + ", '" + str(
                                bmap_values_row["Description"]) + "','" + str(
                                    bmap_values_row["Description"]) + "');\n\n"
                    insert_st = insert_into_st + insert_values
                    f.write(del_st)
                    f.write(insert_st)
    f.close()
コード例 #5
0
def hist_timegap_check(cf, source_output_path, table_mapping, core_tables):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    count = 1
    for table_mapping_index, table_mapping_row in table_mapping.iterrows():
        hist_check_name_line = "---hist_timegap_Test_Case_" + str(
            count) + "---"
        if table_mapping_row['Historization algorithm'] == 'HISTORY':
            target_table = table_mapping_row['Target table name']
            process_name = table_mapping_row['Mapping name']
            hist_cols = table_mapping_row['Historization columns'].split(',')
            hist_cols = [x.strip() for x in hist_cols]
            start_date = TransformDDL.get_core_tbl_sart_date_column(
                core_tables, target_table)
            end_date = TransformDDL.get_core_tbl_end_date_column(
                core_tables, target_table)
            hist_keys = TransformDDL.get_trgt_hist_keys(
                core_tables, target_table, hist_cols)
            call_line1 = "SELECT " + hist_keys + ',' + start_date + ',end_'
            call_line2 = "FROM ( sel " + hist_keys + ',' + start_date + ',MAX(' + end_date + ')over(partition by '
            call_line3 = hist_keys + ' order by ' + start_date + ' rows between 1 preceding and 1 preceding)as end_'
            call_line4 = 'FROM ' + cf.base_DB + '.' + target_table
            call_line5 = "WHERE PROCESS_NAME = 'TXF_CORE_" + process_name + "')tst"
            call_line6 = "WHERE tst.end_ + INTERVAL'1'SECOND<>tst." + start_date + ';' + '\n\n\n'
            hist_test_case_exp = hist_check_name_line + '\n' + call_line1 + '\n' + call_line2 + '\n' + call_line3 + '\n' \
                                 + call_line4 + '\n' + call_line5 + '\n' + call_line6
            f.write(hist_test_case_exp)
            count = count + 1
    f.close()
コード例 #6
0
def hist_start_end_null_check(cf, source_output_path, table_mapping,
                              core_tables):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    count = 1
    for table_mapping_index, table_mapping_row in table_mapping.iterrows():
        hist_check_name_line = "---hist_start_end_null_Test_Case_" + str(
            count) + "---"
        if table_mapping_row['Historization algorithm'] == 'HISTORY':
            target_table = table_mapping_row['Target table name']
            process_name = table_mapping_row['Mapping name']
            hist_cols = table_mapping_row['Historization columns'].split(',')
            hist_cols = [x.strip() for x in hist_cols]
            hist_keys = TransformDDL.get_trgt_hist_keys(
                core_tables, target_table, hist_cols)
            start_date = TransformDDL.get_core_tbl_sart_date_column(
                core_tables, target_table)
            end_date = TransformDDL.get_core_tbl_end_date_column(
                core_tables, target_table)
            call_line1 = "SELECT " + hist_keys + " FROM " + cf.base_DB + '.' + target_table + " WHERE " + start_date + " IS NULL "
            call_line2 = "AND " + end_date + " IS NULL AND PROCESS_NAME = 'TXF_CORE_" + process_name + "'"
            call_line3 = "GROUP BY " + hist_keys
            call_line4 = "HAVING COUNT(*)>1;" + '\n\n\n'
            hist_test_case_exp = hist_check_name_line + '\n' + call_line1 + '\n' + call_line2 + '\n' \
                                 + call_line3 + '\n' + call_line4
            f.write(hist_test_case_exp)
            count = count + 1
    f.close()
コード例 #7
0
def duplicates_check(cf, source_output_path, table_mapping, core_tables):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    count = 0
    core_tables_list = TransformDDL.get_src_core_tbls(table_mapping)
    for table_name in core_tables_list:
        count = count + 1
        core_table_pks = TransformDDL.get_trgt_pk(core_tables, table_name)
        dup_line = "---DUP_Test_Case_" + str(count) + "---" + '\n'
        dup_test_case_exp_line1 = 'SEL ' + core_table_pks + ' FROM ' + cf.base_DB + '.'
        dup_test_case_exp_line2 = table_name + ' GROUP BY ' + core_table_pks + ' HAVING COUNT(*)>1;' + '\n' + '\n'
        f.write(dup_line + dup_test_case_exp_line1 + dup_test_case_exp_line2)
    f.close()
コード例 #8
0
ファイル: D003.py プロジェクト: PeterMGeorge/new_udi
def d003(cf, source_output_path, BMAP_values, BMAP):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    try:
        BMAP_values = BMAP_values[BMAP_values["Code set name"] != '']
        insert_st_header = "INSERT INTO " + cf.UTLFW_t + ".BMAP_STANDARD_MAP ( \n"
        bm_tbl_cols = "Source_Code \n" + ",Domain_Id  \n" + ",Code_Set_Id  \n" + ",EDW_Code  \n" + ",Description  \n"
        bm_tbl_cols += ",Start_Date  \n" + ",End_Date  \n" + ",Record_Deleted_Flag  \n" + ",Ctl_Id  \n" + ",Process_Name \n"
        bm_tbl_cols += ",Process_Id  \n" + ",Update_Process_Name  \n" + ",Update_Process_Id  \n) VALUES ( \n"
        insert_st_header += bm_tbl_cols

        for bmap_index, bmap_row in BMAP_values.iterrows():
            domain_id = ""
            edw_code = ""
            source_code = str(bmap_row["Source code"]).strip()
            if bmap_row["Code domain ID"] != '':
                domain_id = int(
                    bmap_row["Code domain ID"]
                )  #int( str(bmap_row["Code domain ID"]).strip())
                domain_id = str(domain_id)
            code_set_id = TransformDDL.get_bmap_code_set_id(
                BMAP, bmap_row["Code set name"])

            if bmap_row["EDW code"] != '':
                edw_code = int(bmap_row["EDW code"])
                edw_code = str(edw_code)

            process_name = ",'" + TransformDDL.get_bmap_physical_tbl_name(
                BMAP, bmap_row["Code set name"]) + "'"
            insert_vals = "'" + source_code + "'\n" + ",'" + domain_id + "'\n"
            insert_vals += ",'" + code_set_id + "'\n" + ",'" + edw_code + "'\n"
            insert_vals += ",'" + str(bmap_row["Description"]).strip(
            ) + "'\n" + ",CURRENT_DATE \n ,DATE  '2999-12-31' \n ,0 \n ,0 \n"
            insert_vals += process_name + "\n,0\n ,NULL \n ,NULL \n);"

            insert_st = insert_st_header + insert_vals

            del_st = "DELETE FROM " + cf.UTLFW_t + ".BMAP_STANDARD_MAP \n WHERE Domain_Id = '" + domain_id + "'\n"
            del_st += "AND Source_Code = '" + source_code + "' \n AND Code_Set_Id = '" + code_set_id + "';"
            f.write(del_st)
            f.write("\n")
            f.write(insert_st)
            f.write("\n\n")

    except:
        funcs.TemplateLogError(cf.output_path, source_output_path, file_name,
                               traceback.format_exc()).log_error()
    f.close()
コード例 #9
0
ファイル: stgCounts.py プロジェクト: OmarNour/new_udi
def stgCounts(cf, source_output_path, System, STG_tables, LOADING_TYPE, flag):
    file_name = funcs.get_file_name(__file__) + '_' + flag
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    if flag == 'Accepted':
        template_path = cf.templates_path + "/" + pm.compareSTGacccounts_template_filename
        file_name += '_' + flag
    else:
        template_path = cf.templates_path + "/" + pm.compareSTGcounts_template_filename
    smx_path = cf.smx_path
    template_string = ""
    try:
        REJ_TABLE_NAME = System['Rejection Table Name']
    except:
        REJ_TABLE_NAME = ''
    try:
        REJ_TABLE_RULE = System['Rejection Table Rules']
    except:
        REJ_TABLE_RULE = ''
    try:
        source_DB = System['Source DB']
    except:
        source_DB = ''

    try:
        template_file = open(template_path, "r")
    except:
        template_file = open(smx_path, "r")
    if LOADING_TYPE == 'ONLINE':
        LOADING_TYPE = 'STG_ONLINE'
    else:
        LOADING_TYPE = 'STG_LAYER'
    for i in template_file.readlines():
        if i != "":
            template_string = template_string + i
    stg_table_names = funcs.get_stg_tables(STG_tables)
    for stg_tables_df_index, stg_tables_df_row in stg_table_names[
        (stg_table_names['Table name'] != REJ_TABLE_NAME)
            & (stg_table_names['Table name'] != REJ_TABLE_RULE)].iterrows():
        TABLE_NAME = stg_tables_df_row['Table name']
        TBL_PKs = TDDL.get_trgt_pk(STG_tables, TABLE_NAME)
        if flag == 'Accepted':
            output_script = template_string.format(
                TABLE_NAME=TABLE_NAME,
                STG_DATABASE=cf.T_STG,
                source_DB=source_DB,
                LOADING_TYPE=LOADING_TYPE,
                REJ_TABLE_NAME=REJ_TABLE_NAME,
                REJ_TABLE_RULE=REJ_TABLE_RULE,
                TBL_PKs=TBL_PKs)
        else:
            output_script = template_string.format(TABLE_NAME=TABLE_NAME,
                                                   STG_DATABASE=cf.T_STG,
                                                   WRK_DATABASE=cf.t_WRK,
                                                   source_DB=source_DB)

        seperation_line = '--------------------------------------------------------------------------------------------------------------------------------------------------------------------'
        output_script = output_script.upper(
        ) + '\n' + seperation_line + '\n' + seperation_line + '\n'
        f.write(output_script.replace('Â', ' '))
    f.close()
コード例 #10
0
def bmap_check(cf, source_output_path, table_mapping, core_tables,
               BMAP_VALUES):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    core_tables_look_ups = core_tables[core_tables['Is lookup'] == 'Y']
    core_tables_look_ups = core_tables_look_ups[
        core_tables_look_ups['Column name'].str.endswith(str('_CD'))]
    core_tables = core_tables[core_tables['Is lookup'] != 'Y']
    count = 1
    core_tables_list = TransformDDL.get_src_core_tbls(table_mapping)
    code_set_names = TransformDDL.get_code_set_names(BMAP_VALUES)

    for table_name in core_tables_list:
        for core_table_index, core_table_row in core_tables[(
                core_tables['Table name'] == table_name)].iterrows():
            for code_set_name in code_set_names:
                for core_tables_look_ups_index, core_tables_look_ups_row in core_tables_look_ups.iterrows(
                ):
                    if str(core_tables_look_ups_row['Table name']
                           ) == code_set_name:
                        if core_tables_look_ups_row[
                                'Column name'] == core_table_row[
                                    'Column name'] and core_table_row[
                                        'PK'] == 'Y':
                            target_model_table = str(
                                core_table_row['Table name'])
                            target_model_column = str(
                                funcs.get_model_col(core_tables,
                                                    target_model_table))
                            lookup_table_name = str(
                                core_tables_look_ups_row['Table name'])
                            target_column_key = str(
                                core_tables_look_ups_row['Column name'])

                            call_line1 = "SEL " + cf.base_DB + "." + target_model_table + "." + target_column_key
                            call_line2 = "," + cf.base_DB + "." + target_model_table + "." + target_model_column + '\n'
                            call_line3 = " FROM " + cf.base_DB + "." + target_model_table + " LEFT JOIN " + cf.base_DB + "." + lookup_table_name + '\n'
                            call_line4 = " ON " + cf.base_DB + "." + target_model_table + "." + target_column_key + '=' + cf.base_DB + "." + lookup_table_name + "." + target_column_key + '\n'
                            call_line5 = " WHERE " + cf.base_DB + "." + lookup_table_name + "." + target_column_key + " IS NULL;\n\n\n"
                            bmap_check_name_line = "---bmap_check_Test_Case_" + str(
                                count) + "---"

                            call_exp = bmap_check_name_line + "\n" + call_line1 + call_line2 + call_line3 + call_line4 + call_line5
                            f.write(call_exp)
                            count = count + 1
    f.close()
コード例 #11
0
def d607(cf, source_output_path, Core_tables, BMAP_values):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    core_tables_list = TransformDDL.get_core_tables_list(Core_tables)
    code_set_names = TransformDDL.get_code_set_names(BMAP_values)
    Data_mover_flag = cf.Data_mover_flag
    if Data_mover_flag == 1:
        Run_date_column = ", RUN_DATE TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP\n"
        partition_statement = "PARTITION BY RANGE_N(RUN_DATE BETWEEN TIMESTAMP '2020-03-03 00:00:00.000000+00:00' AND TIMESTAMP '2100-03-03 00:00:00.000000+00:00' EACH INTERVAL'1'DAY)\n"
    else:
        Run_date_column = ""
        partition_statement = ""

    for code_set in code_set_names:
        lkp_ddl = ''
        lkp_tbl_header = 'CREATE SET TABLE ' + cf.core_table + '.' + code_set + ', FALLBACK (\n'

        if code_set not in core_tables_list:
            error_txt = "--Error: Table " + code_set + " Not Found in Core tables. Can't generate its ddl. \n"
            f.write(error_txt)

        for lkp_tbl_indx, lkp_tbl_row in Core_tables[(
                Core_tables['Table name'] == code_set)].iterrows():
            lkp_ddl += lkp_tbl_row['Column name'] + ' ' + lkp_tbl_row[
                'Data type'] + ' '
            if lkp_tbl_row['Data type'].find('VARCHAR') != -1:
                lkp_ddl += 'CHARACTER SET UNICODE NOT CASESPECIFIC' + ' '
            if lkp_tbl_row['Mandatory'] == 'Y':
                lkp_ddl += 'NOT NULL '

            lkp_ddl += ',\n'

        core_tech_cols = 'Start_Ts	TIMESTAMP(6) WITH TIME ZONE \n' + ',End_Ts	TIMESTAMP(6) WITH TIME ZONE \n'
        core_tech_cols += ",Start_Date	DATE FORMAT 'YYYY-MM-DD' \n" + ",End_Date	DATE FORMAT 'YYYY-MM-DD' \n"
        core_tech_cols += ',Record_Deleted_Flag	BYTEINT \n' + ',Ctl_Id	SMALLINT COMPRESS(997) \n'
        core_tech_cols += ',Process_Name	VARCHAR(128)\n' + ',Process_Id	INTEGER \n'
        core_tech_cols += ',Update_Process_Name	VARCHAR(128)\n' + ',Update_Process_Id	INTEGER \n' + Run_date_column
        lkp_tbl_pk = ') UNIQUE PRIMARY INDEX (' + TransformDDL.get_trgt_pk(
            Core_tables, code_set) + ')\n' + partition_statement + '; \n  \n'
        lkp_tbl_ddl = lkp_tbl_header + lkp_ddl + core_tech_cols + "\n" + lkp_tbl_pk
        f.write(lkp_tbl_ddl)
    f.close()
コード例 #12
0
def d215(cf, source_output_path, source_name, System, STG_tables):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    smx_path = cf.smx_path
    template_path = cf.templates_path + "/" + pm.D215_template_filename
    template_string = ""
    try:
        REJ_TABLE_NAME = System['Rejection Table Name']
    except:
        REJ_TABLE_NAME = ''
    try:
        REJ_TABLE_RULE = System['Rejection Table Rules']
    except:
        REJ_TABLE_RULE = ''

    try:
        source_DB = System['Source DB']
    except:
        source_DB = ''

    try:
        template_file = open(template_path, "r")
    except:
        template_file = open(smx_path, "r")

    for i in template_file.readlines():
        if i != "":
            template_string = template_string + i
    stg_table_names = funcs.get_stg_tables(STG_tables)
    for stg_tables_df_index, stg_tables_df_row in stg_table_names[(stg_table_names['Table name'] != REJ_TABLE_NAME) & (stg_table_names['Table name'] != REJ_TABLE_RULE)].iterrows():
        TABLE_NAME = stg_tables_df_row['Table name']
        TABLE_COLUMNS = funcs.get_stg_table_columns(STG_tables, source_name, TABLE_NAME)
        TBL_PKs = TDDL.get_trgt_pk(STG_tables, TABLE_NAME)
        STG_TABLE_COLUMNS = ""
        WRK_TABLE_COLUMNS = ""
        lengthh = len(TABLE_COLUMNS)
        for stg_tbl_index, stg_tbl_row in TABLE_COLUMNS.iterrows():
            align = '' if stg_tbl_index >= lengthh - 1 else '\n\t'
            STG_TABLE_COLUMNS += 'STG_TBL.' + '"' + stg_tbl_row['Column name'] + '"' + ',' + align
            WRK_TABLE_COLUMNS += 'WRK_TBL.' + '"' + stg_tbl_row['Column name'] + '"' + ',' + align
        output_script = template_string.format(TABLE_NAME=TABLE_NAME,
                                               STG_TABLE_COLUMNS=STG_TABLE_COLUMNS,
                                               WRK_TABLE_COLUMNS=WRK_TABLE_COLUMNS,
                                               STG_DATABASE=cf.T_STG,
                                               WRK_DATABASE=cf.t_WRK,
                                               STG_VDATABASE=cf.v_stg,
                                               REJ_TABLE_NAME=REJ_TABLE_NAME,
                                               REJ_TABLE_RULE=REJ_TABLE_RULE,
                                               TBL_PKs=TBL_PKs,
                                               source_DB=source_DB
                                               )
        output_script = output_script.upper() + '\n' + '\n' + '\n'
        f.write(output_script.replace('Â', ' '))
    f.close()
コード例 #13
0
def d610(cf, source_output_path, Table_mapping):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    try:
        core_tables_list = TransformDDL.get_src_core_tbls(Table_mapping)

        for tbl_name in core_tables_list:
            core_view = 'REPLACE VIEW '+cf.core_view+'.'+tbl_name+' AS SELECT * FROM ' +cf.core_table+'.'+tbl_name+'; \n'
            f.write(core_view)
    except:
        funcs.TemplateLogError(cf.output_path, source_output_path, file_name, traceback.format_exc()).log_error()
    f.close()
コード例 #14
0
def hist_start_end_null_check(cf, source_output_path, table_mapping,
                              core_tables):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    count = 1
    for table_mapping_index, table_mapping_row in table_mapping.iterrows():
        hist_check_name_line = "---hist_start_gr_end_Test_Case_" + str(
            count) + "---"
        if table_mapping_row['Historization algorithm'] == 'HISTORY':
            target_table = table_mapping_row['Target table name']
            process_name = table_mapping_row['Mapping name']
            start_date = TransformDDL.get_core_tbl_sart_date_column(
                core_tables, target_table)
            end_date = TransformDDL.get_core_tbl_end_date_column(
                core_tables, target_table)
            call_line1 = "SELECT * FROM " + cf.base_DB + '.' + target_table + " WHERE " + start_date + " > " + end_date
            call_line2 = "AND PROCESS_NAME = 'TXF_CORE_" + process_name + "' ;" + '\n\n\n'
            hist_test_case_exp = hist_check_name_line + '\n' + call_line1 + '\n' + call_line2
            f.write(hist_test_case_exp)
            count = count + 1
    f.close()
コード例 #15
0
ファイル: D610.py プロジェクト: OmarNour/new_udi
def d610(cf, source_output_path, Table_mapping, STG_tables, source_name):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    core_tables_list = TransformDDL.get_src_core_tbls(Table_mapping)
    src_look_up_tables = funcs.get_src_code_set_names(STG_tables, source_name)

    for tbl_name in core_tables_list:
        core_view = 'REPLACE VIEW ' + cf.core_view + '.' + tbl_name + ' AS LOCK ROW FOR ACCESS SELECT * FROM ' + cf.core_table + '.' + tbl_name + '; \n'
        f.write(core_view)
    for src_look_up_table in src_look_up_tables:
        core_view = 'REPLACE VIEW ' + cf.core_view + '.' + src_look_up_table + ' AS LOCK ROW FOR ACCESS SELECT * FROM ' + cf.core_table + '.' + src_look_up_table + '; \n'
        f.write(core_view)
    f.close()
コード例 #16
0
ファイル: D607.py プロジェクト: PeterMGeorge/new_udi
def d607(cf, source_output_path, Core_tables, BMAP_values):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    try:
        core_tables_list = TransformDDL.get_core_tables_list(Core_tables)
        code_set_names = TransformDDL.get_code_set_names(BMAP_values)

        for code_set in code_set_names:
            lkp_ddl = ''
            lkp_tbl_header = 'CREATE SET TABLE ' + cf.core_table + '.' + code_set + ', FALLBACK (\n'

            if code_set not in core_tables_list:
                error_txt = "--Error: Table " + code_set + " Not Found in Core tables. Can't generate its ddl. \n"
                f.write(error_txt)

            for lkp_tbl_indx, lkp_tbl_row in Core_tables[(
                    Core_tables['Table name'] == code_set)].iterrows():
                lkp_ddl += lkp_tbl_row['Column name'] + ' ' + lkp_tbl_row[
                    'Data type'] + ' '
                if lkp_tbl_row['Data type'].find('VARCHAR') != -1:
                    lkp_ddl += 'CHARACTER SET UNICODE NOT CASESPECIFIC' + ' '
                if lkp_tbl_row['Mandatory'] == 'Y':
                    lkp_ddl += 'NOT NULL '

                lkp_ddl += ',\n'

            core_tech_cols = 'Start_Ts	TIMESTAMP(6) WITH TIME ZONE \n' + ',End_Ts	TIMESTAMP(6) WITH TIME ZONE \n'
            core_tech_cols += ",Start_Date	DATE FORMAT 'YYYY-MM-DD' \n" + ",End_Date	DATE FORMAT 'YYYY-MM-DD' \n"
            core_tech_cols += ',Record_Deleted_Flag	BYTEINT \n' + ',Ctl_Id	SMALLINT COMPRESS(997) \n'
            core_tech_cols += ',Process_Name	VARCHAR(128)\n' + ',Process_Id	INTEGER \n'
            core_tech_cols += ',Update_Process_Name	VARCHAR(128)\n' + ',Update_Process_Id	INTEGER \n'
            lkp_tbl_pk = ') UNIQUE PRIMARY INDEX (' + TransformDDL.get_trgt_pk(
                Core_tables, code_set) + '); \n\n'
            lkp_tbl_ddl = lkp_tbl_header + lkp_ddl + core_tech_cols + "\n" + lkp_tbl_pk
            f.write(lkp_tbl_ddl)
    except:
        funcs.TemplateLogError(cf.output_path, source_output_path, file_name,
                               traceback.format_exc()).log_error()
    f.close()
コード例 #17
0
def bmap_unmatched_values_check(cf, source_output_path, table_mapping,
                                core_tables, BMAP, BMAP_VALUES):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    core_tables_look_ups = core_tables[core_tables['Is lookup'] == 'Y']
    count = 1
    lookup_tables_list = TransformDDL.get_src_lkp_tbls(table_mapping,
                                                       core_tables)
    CD_column = ''
    CD_SET_ID_val = ''
    count = 0
    code_set_names = TransformDDL.get_code_set_names(BMAP_VALUES)

    for code_set_name in code_set_names:
        for table_name in lookup_tables_list:
            if table_name == code_set_name:
                for core_table_index, core_table_row in core_tables_look_ups.iterrows(
                ):
                    if core_table_row['Table name'] == table_name:
                        if str(core_table_row['Column name']).endswith(
                                str('_CD')):
                            CD_column = core_table_row['Column name']
                    for bmap_table_index, bmap_table_row in BMAP.iterrows():
                        if bmap_table_row['Code set name'] == table_name:
                            CD_SET_ID_val = str(bmap_table_row['Code set ID'])
                if CD_column != '' and CD_SET_ID_val != '':
                    bmap_check_name_line = "---bmap_unmatched_Test_Case_" + str(
                        count) + "---"
                    call_line1 = "SEL COALESCE(EDW_CODE,'NOT IN BMAP TABLE BUT IN BASE TABLE')AS EDW_CODE,\n"
                    call_line2 = "COALESCE(" + CD_column + ",'NOT IN BASE TABLE BUT IN BMAP TABLE')AS BASE_CODE\n"
                    call_line3 = " FROM " + cf.UTLFW_v + ".BMAP_STANDARD_MAP FULL OUTER JOIN " + cf.base_DB + '.' + table_name + '\n'
                    call_line4 = "ON " + cf.UTLFW_v + ".BMAP_STANDARD_MAP.EDW_CODE = " + cf.base_DB + '.' + table_name + '.' + CD_column + '\n'
                    call_line5 = "WHERE EDW_CODE IS NULL OR " + CD_column + " IS NULL AND CODE_SET_ID = " + CD_SET_ID_val + ';\n\n\n'
                    call_exp = bmap_check_name_line + "\n" + call_line1 + call_line2 + call_line3 + call_line4 + call_line5
                    f.write(call_exp)
                    count = count + 1
    f.close()
コード例 #18
0
def d600(cf, source_output_path, Table_mapping, Core_tables):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    core_tables_list = TransformDDL.get_src_core_tbls(Table_mapping)
    Data_mover_flag = cf.Data_mover_flag
    if Data_mover_flag == 1:
        Run_date_column = ", RUN_DATE TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP\n"
        partition_statement = "PARTITION BY RANGE_N(RUN_DATE BETWEEN TIMESTAMP '2020-03-03 00:00:00.000000+00:00' AND TIMESTAMP '2100-03-03 00:00:00.000000+00:00' EACH INTERVAL'1'DAY)\n"
    else:
        Run_date_column = ""
        partition_statement = ""

    for tbl_name in core_tables_list:
        col_ddl = ''
        core_tbl_header = 'CREATE SET TABLE ' + cf.core_table + '.' + tbl_name + ', FALLBACK (\n'

        for core_tbl_index, core_tbl_row in Core_tables[(
                Core_tables['Table name'] == tbl_name)].iterrows():
            col_ddl += core_tbl_row['Column name'] + ' ' + core_tbl_row[
                'Data type'] + ' '
            if (core_tbl_row['Data type'].find('VARCHAR') != -1):
                col_ddl += 'CHARACTER SET UNICODE NOT CASESPECIFIC' + ' '
            if (core_tbl_row['Mandatory'] == 'Y'):
                col_ddl += 'NOT NULL '
            col_ddl += '\n ,'
        #  col_ddl= col_ddl[0:len(col_ddl)-1]
        core_tech_cols = 'Start_Ts	TIMESTAMP(6) WITH TIME ZONE \n' + ',End_Ts	TIMESTAMP(6) WITH TIME ZONE \n'
        core_tech_cols += ",Start_Date	DATE FORMAT 'YYYY-MM-DD' \n" + ",End_Date	DATE FORMAT 'YYYY-MM-DD' \n"
        core_tech_cols += ',Record_Deleted_Flag	BYTEINT \n' + ',Ctl_Id	SMALLINT COMPRESS(997) \n'
        core_tech_cols += ',Process_Name	VARCHAR(128)\n' + ',Process_Id	INTEGER \n'
        core_tech_cols += ',Update_Process_Name	VARCHAR(128)\n' + ',Update_Process_Id	INTEGER \n' + Run_date_column
        core_tbl_pk = ') PRIMARY INDEX (' + TransformDDL.get_trgt_pk(
            Core_tables, tbl_name) + ')\n' + partition_statement + '; \n  \n'
        core_tbl_ddl = core_tbl_header + col_ddl + core_tech_cols + core_tbl_pk
        f.write(core_tbl_ddl)
    f.close()
コード例 #19
0
def ri_check(cf, source_output_path, table_mapping, RI_relations):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    count = 1
    core_tables_list = TransformDDL.get_src_core_tbls(table_mapping)
    for table_name in core_tables_list:
        for ri_table_index,ri_table_row in RI_relations.iterrows():
            RI_line = "---RI_Test_Case_" + str(count) + "---"
            if ri_table_row['CHILD TABLE'] == table_name :
                call_line1 = "SELECT DISTINCT " + 'CHILD_TABLE.' + ri_table_row['CHILD COLUMN']
                call_line2 = " FROM " + cf.base_DB + '.' + ri_table_row['CHILD TABLE'] + " CHILD_TABLE LEFT JOIN " + cf.base_DB + '.' + ri_table_row['PARENT TABLE'] + " PARENT_TABLE "
                call_line3 = " ON CHILD_TABLE." + ri_table_row['CHILD COLUMN']
                call_line4 = " = PARENT_TABLE." + ri_table_row['PARENT COLUMN']
                call_line5 = " WHERE PARENT_TABLE." + ri_table_row['PARENT COLUMN'] + " IS NULL"
                call_line6 = " AND CHILD_TABLE." + ri_table_row['CHILD COLUMN'] + " IS NOT NULL;"

                call_exp = RI_line+"\n"+call_line1+'\n'+call_line2 +'\n'+ call_line3+call_line4+'\n'+call_line5+call_line6+'\n\n'
                f.write(call_exp)
                count = count + 1
    f.close()
コード例 #20
0
def process_check(cf, source_output_path, source_name, Table_mapping,
                  Core_tables):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    count = 1
    for table_maping_index, table_maping_row in Table_mapping.iterrows():
        process_name_line = "---PROCESS_CHECK_Test_Case_" + str(count) + "---"
        process_name = str(table_maping_row['Mapping name'])
        tbl_name = table_maping_row['Target table name']
        table_pks = TransformDDL.get_trgt_pk(Core_tables, tbl_name)
        table_pks_splitted = table_pks.split(',')
        call_line1 = "SEL * FROM " + cf.INPUT_VIEW_DB + ".TXF_CORE_" + process_name + "_IN INP_VIEW"
        call_line2 = " WHERE NOT EXISTS ( SEL 1 FROM " + cf.base_view + "." + tbl_name + " BASE_VIEW"
        call_line3 = " WHERE INP_VIEW." + table_pks_splitted[
            0] + " = BASE_VIEW." + table_pks_splitted[0]
        process_check_test_case_exp = call_line1 + '\n' + call_line2 + '\n' + call_line3 + ');\n\n\n'
        process_check_test_case_exp = process_name_line + "\n" + process_check_test_case_exp
        f.write(process_check_test_case_exp)
        count = count + 1
    f.close()
コード例 #21
0
ファイル: D002.py プロジェクト: PeterMGeorge/new_udi
def d002(cf, source_output_path, Core_tables, Table_mapping):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    try:
        # Core_tables=TransformDDL.get_src_core_tbls(source_name, Core_tables, Table_mapping)
        Table_mappings = Table_mapping
        hist_key_insert_header = ""
        history_tbl = cf.GCFR_t + "." + cf.history_tbl
        hist_key_insert_header += "INSERT INTO " + history_tbl
        hist_key_insert_header += "( TRF_TABLE_NAME,PROCESS_NAME,TABLE_NAME,RECORD_ID,START_DATE_COLUMN,END_DATE_COLUMN,HISTORY_COLUMN, HISTORY_KEY)\n"
        hist_key_insert_header += "VALUES ('"
        tbl_mapping_name = ""
        process_name = ""
        trgt_tbl = ""
        start_date_column = ""
        end_date_column = ""
        history_key = ""
        history_column = ""
        for tbl_mapping_index, table_maping_row in Table_mappings[
                Table_mappings['Historization algorithm'] ==
                "HISTORY"].iterrows():
            tbl_mapping_name = table_maping_row['Mapping name']
            trgt_layer = table_maping_row['Layer']
            process_name = "TXF_" + trgt_layer + "_" + tbl_mapping_name
            trgt_tbl = table_maping_row['Target table name']
            start_date_column = TransformDDL.get_core_tbl_sart_date_column(
                Core_tables, trgt_tbl)
            end_date_column = TransformDDL.get_core_tbl_end_date_column(
                Core_tables, trgt_tbl)

            # history_column_vals = table_maping_row ['Historization columns']
            # history_column_list=pd.unique(list(history_column_vals)).split(',')
            history_column_list = table_maping_row[
                'Historization columns'].split(',')
            history_column_list = [x.strip() for x in history_column_list]

            history_key_list = TransformDDL.get_core_tbl_hist_keys_list(
                Core_tables, trgt_tbl, history_column_list)
            history_key_list = [x.strip() for x in history_key_list]

            del_st = " DELETE FROM " + history_tbl + " WHERE PROCESS_NAME = '" + process_name + "';\n"
            f.write(del_st)
            f.write("--History_keys \n")

            for hist_key in history_key_list:
                hist_key_insert_st = process_name + "','" + process_name + "','" + trgt_tbl + "','" + tbl_mapping_name + "','" + start_date_column
                hist_key_insert_st += "','" + end_date_column + "'," + "null,"
                if hist_key != "undefined":
                    hist_key = funcs.single_quotes(hist_key)

                hist_key_insert_st += hist_key + "); \n"
                f.write(hist_key_insert_header)
                f.write(hist_key_insert_st)

            f.write("--History_columns \n")
            # f.write(str(history_column_list))
            # f.write(str(len(history_column_list)))
            # f.write("\n")

            for hist_col in history_column_list:
                if hist_col == '':
                    hist_col = "undefined"
                else:
                    hist_col = funcs.single_quotes(hist_col)

                hist_col_insert_st = process_name + "','" + process_name + "','" + trgt_tbl + "','" + tbl_mapping_name + "','" + start_date_column
                hist_col_insert_st += "','" + end_date_column + "'," + hist_col + "," + "null); \n"
                f.write(hist_key_insert_header)
                f.write(hist_col_insert_st)
            f.write("\n \n")
    except:
        funcs.TemplateLogError(cf.output_path, source_output_path, file_name,
                               traceback.format_exc()).log_error()
    f.close()
コード例 #22
0
ファイル: D620.py プロジェクト: OmarNour/new_udi
def d620(cf, source_output_path, Table_mapping, Column_mapping, Core_tables,
         Loading_Type, input_view_flag, stg_tables):
    file_name = funcs.get_file_name(__file__)
    if input_view_flag == 'TESTING':
        file_name = 'testing_input_views'
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    notes = list()
    view_name_suffix = ''
    for table_maping_index, table_maping_row in Table_mapping.iterrows():

        inp_view_from_clause = ''

        process_type = 'TXF'
        layer = str(table_maping_row['Layer'])
        table_maping_name = str(table_maping_row['Mapping name'])
        src_layer = str(table_maping_row['Source layer'])
        process_name = process_type + "_" + layer + "_" + table_maping_name

        if input_view_flag == 'UDI':
            view_name_suffix = ' '
        elif input_view_flag == 'TESTING':
            view_name_suffix = '_TESTING '
        inp_view_header = 'REPLACE VIEW ' + cf.INPUT_VIEW_DB + '.' + process_name + '_IN' + view_name_suffix + 'AS LOCK ROW FOR ACCESS' + '\n'
        target_table = str(table_maping_row['Target table name'])
        apply_type = table_maping_row['Historization algorithm']
        process_names_condition = str(table_maping_row['SubProcess Condition'])
        process_names_case_when = process_names_condition.replace(
            '#process_name#', process_name)
        process_names_case_when_clause = '(' + process_names_case_when + ') AS PROCESS_NAME '

        main_src = table_maping_row['Main source']

        SRCI = cf.SI_DB + '.'
        main_src = main_src.replace('#SRCI#', SRCI)
        main_src_alias = table_maping_row['Main source alias'].upper()

        main_src_alias_mt = main_src_alias.replace('_ONLINE', '')
        modification_type_exists = funcs.table_has_modification_type_column(
            stg_tables, main_src_alias_mt)
        if modification_type_exists:
            modification_type = main_src_alias + '.MODIFICATION_TYPE'
        else:
            modification_type = "'U' AS MODIFICATION_TYPE"

        if main_src == main_src_alias:
            main_src = cf.SI_DB + '.' + main_src
        # core_tables_list= pd.unique(list(Core_tables['Table name']))
        core_tables_list = TransformDDL.get_core_tables_list(Core_tables)

        if main_src is None:
            msg = 'Missing Main Source  for Table Mapping:{}'.format(
                str(table_maping_row['Mapping name']))
            notes += msg
            continue

        if target_table not in core_tables_list:
            msg = 'TARGET TABLE NAME not found in Core Tables Sheet for Table Mapping:{}'.format(
                str(table_maping_row['Mapping name']))
            notes += msg
            continue

        sub = "/* Target table:\t" + target_table + "*/" + '\n' \
              + "/* Table mapping:\t" + table_maping_name + "*/" + '\n' \
              + "/* Mapping group:\t" + table_maping_row['Mapping group'] + "*/" + '\n' \
              + "/* Apply type:\t\t" + apply_type + "*/"
        inp_view_select_clause = 'SELECT ' + '\n' + sub + TransformDDL.get_select_clause(
            target_table, Core_tables, table_maping_name, Column_mapping)
        map_grp = ' CAST(' + funcs.single_quotes(
            table_maping_row['Mapping group']
        ) + ' AS VARCHAR(100)) AS  MAP_GROUP ,'
        start_date = '(SELECT Business_Date FROM ' + cf.GCFR_V + '.GCFR_Process_Id' + '\n' + '   WHERE Process_Name = ' + "'" + process_name + "'" + '\n' + ') AS Start_Date,'
        end_date = 'DATE ' + "'9999-12-31'" + ' AS End_Date,'

        load_id = main_src_alias + '.LOAD_ID'
        batch_id = main_src_alias + '.BATCH_ID'
        ref_key = main_src_alias + '.REF_KEY'
        if process_names_case_when != '':
            inp_view_select_clause = inp_view_select_clause + '\n' + map_grp + '\n' + start_date + '\n' + end_date + '\n' + modification_type + ',' + '\n' + batch_id + ',' + '\n' + ref_key + ',' + process_names_case_when_clause + '\n'
        else:
            inp_view_select_clause = inp_view_select_clause + '\n' + map_grp + '\n' + start_date + '\n' + end_date + '\n' + modification_type + ',' + '\n' + batch_id + ',' + '\n' + ref_key + '\n'

        if table_maping_row['Join'] == "":
            inp_view_from_clause = 'FROM ' + main_src + ' ' + main_src_alias
        elif table_maping_row['Join'] != "":
            if table_maping_row['Join'].find(
                    "FROM".strip()) == -1:  # no subquery in join clause
                inp_view_from_clause = 'FROM ' + main_src + ' ' + main_src_alias
                inp_view_from_clause = inp_view_from_clause + '\n' + table_maping_row[
                    'Join']
                if '#CORE#' not in inp_view_from_clause:
                    join = 'JOIN ' + cf.SI_VIEW + '.'
                else:
                    join = ' '
                inp_view_from_clause = inp_view_from_clause.replace(
                    'JOIN ', join)
                inp_view_from_clause = inp_view_from_clause.replace(
                    '#CORE#', cf.core_view + '.')
            else:
                sub_query_flag = 1
                join_clause = table_maping_row['Join']
                subquery_clause = TransformDDL.get_sub_query(
                    cf, join_clause, src_layer, main_src)
                inp_view_from_clause = ' FROM \n' + subquery_clause

        inp_view_where_clause = ''
        if table_maping_row['Filter criterion'] != "":
            inp_view_where_clause = '\n' + 'Where ' + table_maping_row[
                'Filter criterion'] + ';'

        elif table_maping_row['Aggregation filter criterion'] != "":
            inp_view_where_clause = inp_view_where_clause + '\n' + table_maping_row[
                'Aggregation filter criterion'] + ';'

        else:
            inp_view_where_clause = ';'

        f.write(inp_view_header)
        f.write(inp_view_select_clause)
        f.write(inp_view_from_clause)
        f.write(inp_view_where_clause)
        f.write("\n")
        f.write("\n")
        f.write("\n")
    f.close()
コード例 #23
0
def d620(cf, source_output_path, Table_mapping, Column_mapping, Core_tables,
         Loading_Type):
    file_name = funcs.get_file_name(__file__)
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    try:
        notes = list()
        for table_maping_index, table_maping_row in Table_mapping.iterrows():

            inp_view_from_clause = ''

            process_type = 'TXF'
            layer = str(table_maping_row['Layer'])
            table_maping_name = str(table_maping_row['Mapping name'])
            src_layer = str(table_maping_row['Source layer'])
            process_name = process_type + "_" + layer + "_" + table_maping_name

            inp_view_header = 'REPLACE VIEW ' + cf.INPUT_VIEW_DB + '.' + process_name + '_IN AS LOCK ROW FOR ACCESS'
            target_table = str(table_maping_row['Target table name'])
            apply_type = table_maping_row['Historization algorithm']

            main_src = table_maping_row['Main source']
            main_src_alias = table_maping_row['Main source alias']

            if main_src == main_src_alias:
                main_src = cf.SI_VIEW + '.' + main_src
            # core_tables_list= pd.unique(list(Core_tables['Table name']))
            core_tables_list = TransformDDL.get_core_tables_list(Core_tables)

            if main_src is None:
                msg = 'Missing Main Source  for Table Mapping:{}'.format(
                    str(table_maping_row['Mapping name']))
                notes += msg
                continue

            if target_table not in core_tables_list:
                msg = 'TARGET TABLE NAME not found in Core Tables Sheet for Table Mapping:{}'.format(
                    str(table_maping_row['Mapping name']))
                notes += msg
                continue

            sub = "/* Target table:\t" + target_table + "*/" + '\n'\
                  + "/* Table mapping:\t" + table_maping_name + "*/" + '\n'\
                  + "/* Mapping group:\t" + table_maping_row['Mapping group'] + "*/" + '\n' \
                  + "/* Apply type:\t\t" + apply_type + "*/"
            inp_view_select_clause = 'SELECT ' + '\n' + sub + TransformDDL.get_select_clause(
                target_table, Core_tables, table_maping_name, Column_mapping)
            map_grp = ' CAST(' + funcs.single_quotes(
                table_maping_row['Mapping group']
            ) + ' AS VARCHAR(100)) AS  MAP_GROUP ,'
            start_date = '(SELECT Business_Date FROM ' + cf.GCFR_V + '.GCFR_Process_Id' + '\n' + '   WHERE Process_Name = ' + "'" + process_name + "'" + '\n' + ') AS Start_Date,'
            end_date = 'DATE ' + "'9999-12-31'" + ' AS End_Date,'

            if Loading_Type == 'OFFLINE':
                modification_type = "'U' AS MODIFICATION_TYPE"
            else:
                modification_type = main_src_alias + '.MODIFICATION_TYPE'

            inp_view_select_clause = inp_view_select_clause + '\n' + map_grp + '\n' + start_date + '\n' + end_date + '\n' + modification_type + '\n'

            if table_maping_row['Join'] == "":
                inp_view_from_clause = 'FROM ' + main_src + ' ' + main_src_alias
            elif table_maping_row['Join'] != "":
                if (table_maping_row['Join'].find(
                        "FROM".strip()) == -1):  #no subquery in join clause
                    inp_view_from_clause = 'FROM ' + main_src + ' ' + main_src_alias
                    inp_view_from_clause = inp_view_from_clause + '\n' + table_maping_row[
                        'Join']
                    join = 'JOIN ' + cf.SI_VIEW + '.'
                    inp_view_from_clause = inp_view_from_clause.replace(
                        'JOIN ', join)
                else:
                    sub_query_flag = 1
                    join_clause = table_maping_row['Join']
                    subquery_clause = TransformDDL.get_sub_query(
                        cf, join_clause, src_layer, main_src)
                    inp_view_from_clause = ' FROM \n' + subquery_clause

            inp_view_where_clause = ';'
            if table_maping_row['Filter criterion'] != "":
                # if (sub_query_flag == 0):
                inp_view_where_clause = 'Where ' + table_maping_row[
                    'Filter criterion'] + ';'
                # else:
                #     inp_view_where_clause = 'Where '+table_maping_row['Filter criterion']+');'

            f.write(inp_view_header)
            f.write("\n")
            f.write(inp_view_select_clause)
            f.write("\n")
            f.write(inp_view_from_clause)
            f.write("\n")
            f.write(inp_view_where_clause)
            f.write("\n")
            f.write("\n")
            f.write("\n")

    except:
        funcs.TemplateLogError(cf.output_path, source_output_path, file_name,
                               traceback.format_exc()).log_error()

    f.close()
コード例 #24
0
def dataValidation(cf, source_output_path, source_name, System, STG_tables,
                   LOADING_TYPE, flag):
    file_name = funcs.get_file_name(__file__) + '_' + flag
    f = funcs.WriteFile(source_output_path, file_name, "sql")
    if flag == 'Accepted':
        template_path = cf.templates_path + "/" + pm.dataValidation_template_filename
        file_name += '_' + flag
    else:
        template_path = cf.templates_path + "/" + pm.dataValidationAll_template_filename
    smx_path = cf.smx_path
    template_string = ""
    try:
        REJ_TABLE_NAME = System['Rejection Table Name']
    except:
        REJ_TABLE_NAME = ''
    try:
        REJ_TABLE_RULE = System['Rejection Table Rules']
    except:
        REJ_TABLE_RULE = ''

    try:
        source_DB = System['Source DB']
    except:
        source_DB = ''

    try:
        template_file = open(template_path, "r")
    except:
        template_file = open(smx_path, "r")

    for i in template_file.readlines():
        if i != "":
            template_string = template_string + i
    stg_table_names = funcs.get_stg_tables(STG_tables)
    if LOADING_TYPE == 'ONLINE':
        LOADING_TYPE = 'STG_ONLINE'
    else:
        LOADING_TYPE = 'STG_LAYER'
    for stg_tables_df_index, stg_tables_df_row in stg_table_names[
        (stg_table_names['Table name'] != REJ_TABLE_NAME)
            & (stg_table_names['Table name'] != REJ_TABLE_RULE)].iterrows():
        TABLE_NAME = stg_tables_df_row['Table name']
        TABLE_COLUMNS = funcs.get_stg_table_columns(STG_tables, source_name,
                                                    TABLE_NAME)
        TBL_PKs = TDDL.get_trgt_pk(STG_tables, TABLE_NAME)
        IBM_STG_TABLE_COLUMNS = ""
        TERADATA_STG_TABLE_COLUMNS = ""
        TERADATA_WRK_TABLE_COLUMNS = ""
        COUNT_COLS = ''
        lengthh = len(TABLE_COLUMNS)
        for stg_tbl_index, stg_tbl_row in TABLE_COLUMNS.iterrows():
            align = '' if stg_tbl_index >= lengthh - 1 else '\n\t\t'
            STGalign = '' if stg_tbl_index >= lengthh - 1 else '\n\t\t\t'
            IBM_STG_TABLE_COLUMNS += 'IBM_STG_TABLE.' + '"' + stg_tbl_row[
                'Column name'] + '"' + ',' + align
            TERADATA_STG_TABLE_COLUMNS += 'TERADATA_STG_TABLE.' + '"' + stg_tbl_row[
                'Column name'] + '"' + ',' + STGalign
            TERADATA_WRK_TABLE_COLUMNS += 'TERADATA_WRK_TABLE.' + '"' + stg_tbl_row[
                'Column name'] + '"' + ',' + STGalign
            COUNT_COLS += str(stg_tbl_index + 1) + ','
        COUNT_COLS = COUNT_COLS[0:len(COUNT_COLS) - 1]
        IBM_STG_TABLE_COLUMNS = IBM_STG_TABLE_COLUMNS[
            0:len(IBM_STG_TABLE_COLUMNS) - 1]
        TERADATA_STG_TABLE_COLUMNS = TERADATA_STG_TABLE_COLUMNS[
            0:len(TERADATA_STG_TABLE_COLUMNS) - 1]
        TERADATA_WRK_TABLE_COLUMNS = TERADATA_WRK_TABLE_COLUMNS[
            0:len(TERADATA_WRK_TABLE_COLUMNS) - 1]

        if flag == 'Accepted':
            output_script = template_string.format(
                TABLE_NAME=TABLE_NAME,
                IBM_STG_TABLE_COLUMNS=IBM_STG_TABLE_COLUMNS,
                TERADATA_STG_TABLE_COLUMNS=TERADATA_STG_TABLE_COLUMNS,
                STG_DATABASE=cf.T_STG,
                REJ_TABLE_NAME=REJ_TABLE_NAME,
                REJ_TABLE_RULE=REJ_TABLE_RULE,
                TBL_PKs=TBL_PKs,
                source_DB=source_DB,
                LOADING_TYPE=LOADING_TYPE,
                COUNT_COLS=COUNT_COLS)
        else:
            output_script = template_string.format(
                TABLE_NAME=TABLE_NAME,
                IBM_STG_TABLE_COLUMNS=IBM_STG_TABLE_COLUMNS,
                TERADATA_STG_TABLE_COLUMNS=TERADATA_STG_TABLE_COLUMNS,
                STG_DATABASE=cf.T_STG,
                REJ_TABLE_NAME=REJ_TABLE_NAME,
                REJ_TABLE_RULE=REJ_TABLE_RULE,
                TBL_PKs=TBL_PKs,
                source_DB=source_DB,
                LOADING_TYPE=LOADING_TYPE,
                COUNT_COLS=COUNT_COLS,
                WRK_DATABASE=cf.t_WRK,
                TERADATA_WRK_TABLE_COLUMNS=TERADATA_WRK_TABLE_COLUMNS)

        seperation_line = '--------------------------------------------------------------------------------------------------------------------------------------------------------------------'
        output_script = output_script.upper(
        ) + '\n' + seperation_line + '\n' + seperation_line + '\n'
        f.write(output_script.replace('Â', ' '))
    f.close()