Ejemplo n.º 1
0
def get_connStr(uid, pwd, db_node):
    conf = ReadConfig()
    db = conf.Read_db_config(db_node)
    if db['driver'] == '{IBM DB2 ODBC DRIVER}':
        con_str = "DRIVER={};DATABASE={};HOSTNAME={};PORT={};UID={};PWD={}".format(db['driver'],db['database'],db['hostname'],db['port'],uid,pwd)
    elif db['driver'] == '{NetezzaSQL}':
        con_str = "DRIVER={};SERVER={};PORT={};DATABASE={};UID={};PWD={}".format(db['driver'],db['hostname'],db['port'],db['database'],uid,pwd)
    else:
        con_str = ''
    return con_str
Ejemplo n.º 2
0
    def generate_source_target_column_map(self,id,pwd,source_schema,source_table,target_schema,target_table):
        db2_column_sample_sql = '''
        SELECT NAME as COL_NM,COLNO as COL_NO
        FROM SYSIBM.SYSCOLUMNS
        WHERE UPPER(TBNAME) IN (SELECT UPPER(NAME) FROM SYSIBM.SYSTABLES WHERE TYPE = 'T') AND
        UPPER(TBCREATOR) = UPPER('{}') -- Schema Name
        AND UPPER (TBNAME) = UPPER('{}') ORDER BY COLNO
        '''
        pda_column_sample_sql = """
        SELECT  ATTNAME as COL_NM,ATTNUM as COL_NO
        FROM _V_RELATION_COLUMN 
        WHERE UPPER(TYPE) = 'TABLE' AND 
        UPPER(SCHEMA) = UPPER('BDWDB') -- Schema Name
        AND UPPER(NAME) = UPPER('{}') ORDER BY ATTNUM;
        """
        print(db2_column_sample_sql)
        print(pda_column_sample_sql)

        '''read source data dic'''

        conf = ReadConfig()
        source_db_node = conf.Read_source_db_node()
        target_db_node = conf.Read_target_db_node()
        source_db = conf.Read_db_config(source_db_node)
        target_db = conf.Read_db_config(target_db_node)
        if source_db['db_type'] == 'db2':
            source_db_sql = db2_column_sample_sql.format(source_schema,source_db)
        elif source_db['db_type'] == 'pda':
            source_db_sql = db2_column_sample_sql.format(source_schema,source_db)
        else:
            source_db_sql = None
            print("The db type is valid")

        if target_db['db_type'] == 'db2':
            target_db_sql = db2_column_sample_sql.format(source_schema,source_db)
        elif target_db['db_type'] == 'pda':
            target_db_sql = db2_column_sample_sql.format(source_schema,source_db)
        else:
            target_db_sql = None
            print("The db type is valid")
        ''' run under source to get source columns'''
        print(source_db_sql)
        print(target_db_sql)
        source_target_mapping = dict()
        rs_source = db_connect.exec_sql_common(source_db_node, id, pwd, source_db_sql)
        rs_target = db_connect.exec_sql_common(target_db_node, id, pwd, target_db_sql)

        for src_line in rs_source:
            source_column_nm = src_line['COL_NM']
            source_column_no = src_line['COL_NO']
            for tgt_line in rs_target:
                if tgt_line['COL_NO'] == source_column_no:
                    source_target_mapping[source_column_nm] = tgt_line['COL_NM']
        return source_target_mapping
Ejemplo n.º 3
0
def exec_sql_with_jdbc(db_node,user,pwd,query):
    conf = ReadConfig()
    db = conf.Read_db_config(db_node)
    jdbc_driver = db['driver']
    jdbc_url = db['url']
    jdbc_user = user
    jdbc_pwd = pwd
    jdbc_query = '"'+query+'"'
    java_path = conf.Read_Java_home()
    current_path = os.path.dirname(os.path.realpath(__file__))
    JDBC_path = os.path.join(current_path,'../JDBC/Query_JDBC.jar')
    command = java_path + '/java -jar ' + JDBC_path + ' '+jdbc_driver+' '+jdbc_url+' '+jdbc_user+' '+jdbc_pwd+' '+jdbc_query
    print("\tRunning Command:"+command)
    rs = os.popen(cmd=command, mode='r')
    query_result = json.loads(rs.readlines()[0])
    return query_result
Ejemplo n.º 4
0
    def iwefresh_positive_test(self, iwrefresh_db_node, iwrefresh_uid,
                               iwrefresh_pwd):
        '''read config file for the job stream'''
        conf = ReadConfig()
        iwrefresh_db_conf = conf.Read_db_config(iwrefresh_db_node)
        job_list = conf.Read_job_list()
        job_status_report = conf.Read_job_status_report()
        job_iw_control_report = []
        iw_refresh_failed_count = 0
        for job in job_list:
            data_group = job['DATAGROUP_ID']
            iw_refresh_control_status = dict()
            if data_group != '':
                job_name = job['JOB_NAME']
                job_run_status = job_status_report[job_name]['Job Status']
                job_start_time = self.job_status_time_transfer(
                    job_status_report[job_name]['Job Start Time'])
                job_end_time = self.job_status_time_transfer(
                    job_status_report[job_name]['Last Run Time'])
                '''step 1 validate if the parallel job show pass'''
                print("step 1: validate the run status of job: %s ..." % (job_name) +" \nstart time:" +str(job_start_time)+"\nend time:"\
                      + str(job_end_time))
                print(
                    "----------------------------------------------------------------------------------"
                )
                if job_run_status == 'RUN OK (1)' or job_run_status == 'RUN OK (1)':
                    print(
                        "     the job status is %s,the status check passed,go to next step"
                        % job_run_status)
                else:
                    print("The DataGroup Related job status is failed")
                    raise TestException.IWRefreshError()
                '''step 2 validate if the IW Refresh record shows pass and the IW Refresh start time big than job start time,
                    the end time is less than the end time '''
                '''get the bludb conf, and trige the db to run the sql'''

                print("step2: Get iw refresh status from iwrefresh db...")
                print(
                    "----------------------------------------------------------------------------------"
                )
                print("Data group: %s" % data_group)
                sql = "select B.DATAGROUP_NM as DATAGROUP_NM,A.STAT as STAT,A.LOAD_START as LOAD_START," \
                      "A.LOAD_END as LOAD_END from XLSSTG.LOAD_STATUS A inner join XLSSTG.DATA_GROUP B on " \
                      "A.DATAGROUP_ID = B.DATAGROUP_ID " \
                      "where B.DATAGROUP_NM= '{}\' AND A.LOAD_START > '{}' ".format(data_group, job_start_time)
                rs = db_connect.exec_sql_common(iwrefresh_db_node,
                                                iwrefresh_uid, iwrefresh_pwd,
                                                sql)
                print("RUNNING QUERY:%s" % sql)
                if len(rs) != 0:
                    iwrefresh_status = rs[0]['STAT']
                    iwrefresh_start_time = rs[0]['LOAD_START']
                    iwrefresh_end_time = rs[0]['LOAD_END']
                    '''gather info to report'''
                    iw_refresh_control_status['JOB_NM'] = job_name
                    iw_refresh_control_status['JOB_START_TIME'] = str(
                        job_start_time)
                    iw_refresh_control_status['JOB_END_TIME'] = str(
                        job_end_time)
                    iw_refresh_control_status['DATA_GROUP_NM'] = data_group
                    iw_refresh_control_status['IWREFRESH_START_TIME'] = str(
                        iwrefresh_start_time)
                    iw_refresh_control_status['IWREFRESH_END_TIME'] = str(
                        iwrefresh_end_time)
                    iw_refresh_control_status[
                        'IWREFRESH_STATUS_'] = iwrefresh_status
                    job_iw_control_report.append(iw_refresh_control_status)
                    print("IWRefresh status: %s" % iwrefresh_status)
                    '''1. check the iw refresh status'''
                    if iwrefresh_status == 'COMPLETE':

                        print(
                            '''\nWhen the job %s run finished, The data group \"%s\" shows \"%s\" ,the IW Refresh test passed
                              ''' % (job_name, data_group, iwrefresh_status))
                    else:
                        iw_refresh_failed_count += 1
                        print(
                            '''When the job %s run finished, The data group \"%s\" shows \"%s\" ,
                         the IW Refresh test passed''' %
                            (job_name, data_group, iwrefresh_status))

                else:
                    iw_refresh_failed_count += 1
                    print(
                        "The IW Refresh control not be triggered when the job %s start, the IW Refresh test failed"
                        % (job_name))
        '''generate the iw refresh report'''
        iw_report = Generate_report()
        iw_report.write_iwefresh_status_to_report(job_iw_control_report)
        iw_report.generate_iwrefresh_positive_report()

        if iw_refresh_failed_count != 0:
            print("one or more table's IWRefresh control failed, "
                  "check the iw_refresh_positive_test_report.xls for detail")
            raise TestException.IWRefreshError()
Ejemplo n.º 5
0
    def rowcount_test(asca_db_node, zos_user, zos_pwd):
        '''Step 1 get source/target table list from asca.asca_control_record'''
        conf = ReadConfig()
        job_list = conf.Read_job_list()
        '''Get asca_control_record through jdbc, store the result to the asca_control_dict'''
        '''1. get the asca control id list'''
        asca_control_id_list = []
        for job in job_list:
            if job['ASCA_CONTROL_POINT_ID'] != '':
                asca_control_id_list.append(job['ASCA_CONTROL_POINT_ID'])
        asca_control_id_string = str(asca_control_id_list).strip('[').strip(
            ']')
        '''2.generate the sql query'''
        print(
            "Step 1: Get asca control result from ASCA.ASCA_CNTROL_RECORD table"
        )
        query = "select SRC_OBJ_NM,TRGT_TBL_NM from ASCA.ASCA_control_point\
                WHERE ASCA_CNTL_PT_ID in ({})".format(asca_control_id_string)
        print("\tQuery:" + query)
        '''3. Trigger jdbc driver to query the data'''
        source_target_mapping = db_connect.exec_sql_with_jdbc(
            asca_db_node, zos_user, zos_pwd, query)
        '''Store the table mapping to a temp file'''
        file_name = os.path.join(conf.read_temp_dir(),
                                 'source_target_mapping.tmp')
        print(file_name)
        with open(file_name, 'w') as f:
            json.dump(source_target_mapping, f)

        print(source_target_mapping)
        print("\tQuery running completed")
        print("Step 2:  start the get source table row count...")
        '''generate query'''
        source_db_node = conf.Read_source_db_node()
        query_str = ''
        for i in range(len(source_target_mapping)):
            if i < len(source_target_mapping) - 1:
                query_str += "select '" + source_target_mapping[i][
                    'SRC_OBJ_NM'] + "' as TABLE_NM, count(*) as ROWCOUNT from " + source_target_mapping[
                        i]['SRC_OBJ_NM'] + " union "
            else:
                query_str += "select '" + source_target_mapping[i][
                    'SRC_OBJ_NM'] + "' as TABLE_NM, count(*) as ROWCOUNT from " + source_target_mapping[
                        i]['SRC_OBJ_NM']
        print(query_str)
        '''run the query '''
        source_rowcount = db_connect.exec_sql_with_jdbc(
            source_db_node, zos_user, zos_pwd, query_str)
        print(source_rowcount)

        print("Step 3: start get target table row count...")
        '''generate target query'''
        target_query = ''
        for i in range(len(source_target_mapping)):
            if i < len(source_target_mapping) - 1:
                target_query += "select '" + source_target_mapping[i][
                    'TRGT_TBL_NM'] + "' as TABLE_NM, count(*) as ROWCOUNT from " + source_target_mapping[
                        i]['TRGT_TBL_NM'] + " union "
            else:
                target_query += "select '" + source_target_mapping[i][
                    'TRGT_TBL_NM'] + "' as TABLE_NM, count(*) as ROWCOUNT from " + source_target_mapping[
                        i]['TRGT_TBL_NM']
        print(target_query)
        '''get target db node'''
        target_db_node = conf.Read_target_db_node()
        db_conf = conf.Read_db_config(target_db_node)
        db_driver = db_conf['driver']
        print(db_driver)
        if db_driver == 'com.ibm.db2.jcc.DB2Driver':
            '''use jdbc to run query'''
            target_rowcount = db_connect.exec_sql_with_jdbc(
                target_db_node, zos_user, zos_pwd, target_query)
        else:
            '''use common driver to run query'''
            target_rowcount = db_connect.exec_sql_common(
                target_db_node, 'siwsit', 'SIWJul2019JulSIW', target_query)
        print(target_rowcount)
        '''Step 4: validation'''
        print("Step 4: validation")
        Rowcount_test_result = []
        for item in source_target_mapping:
            rowcount_record = {}
            rowcount_record['SOURCE_TABLE'] = item['SRC_OBJ_NM']
            rowcount_record['TARGET_TABLE'] = item['TRGT_TBL_NM']
            for element in source_rowcount:
                if element['TABLE_NM'] == item['SRC_OBJ_NM']:
                    rowcount_record['SOURCE_ROWCOUNT'] = str(
                        element['ROWCOUNT'])
            for element in target_rowcount:
                if element['TABLE_NM'] == item['TRGT_TBL_NM']:
                    rowcount_record['TARGET_ROWCOUNT'] = str(
                        element['ROWCOUNT'])
            rowcount_record['TEST_RESULT'] = "PASS" if (
                rowcount_record['SOURCE_ROWCOUNT']
                == rowcount_record['TARGET_ROWCOUNT']) else "FAIL"
            print("Source table name:" + rowcount_record['SOURCE_TABLE'])
            print("Target table name:" + rowcount_record['TARGET_TABLE'])
            print("Source table rowcount:" +
                  rowcount_record['SOURCE_ROWCOUNT'])
            print("Target table rowcount:" +
                  rowcount_record['TARGET_ROWCOUNT'])
            print("Row count test result:" + rowcount_record['TEST_RESULT'])
            Rowcount_test_result.append(rowcount_record)
        print(Rowcount_test_result)
        '''generate report'''
        gen_rowcount = Generate_report()
        gen_rowcount.write_row_count_status_to_json(Rowcount_test_result)
        gen_rowcount.generate_row_count_test_report()
        '''validate the test case result'''
        failed_count = 0
        for item in Rowcount_test_result:
            if item['TEST_RESULT'] == 'FAIL':
                failed_count += 1
        if failed_count > 0:
            print(
                "One or more tables' rowcount between source and target mismatch, row count test failed "
                "check the row_count_test_report.xls for detail")
            raise TestException.RowcountError()
        else:
            print(
                "All tables' row count between source and target matched,the row count test passed."
            )
Ejemplo n.º 6
0
    def sample_data_test(self, source_db_node, source_user, source_pwd,
                         target_db_node, target_user, target_pwd):
        print("Get necessary metadata from source and target")
        """Step 1 get source/target table list from source target table mapping"""
        db2_metadata_query = "SELECT NAME as COLUMN_NAME,TBNAME as TABLE_NAME,TBCREATOR AS TABLE_SCHEMA,COLNO AS COLUMN_NUMBER,COLTYPE AS COLUMN_TYPE,LENGTH AS COLUMN_LENGTH,KEYSEQ AS KEY_SEQ \
                            FROM SYSIBM.SYSCOLUMNS \
                            WHERE UPPER(TBNAME) IN (SELECT UPPER(NAME) FROM SYSIBM.SYSTABLES WHERE TYPE = 'T') AND \
                            UPPER(TBCREATOR) in ({}) \
                            AND UPPER (TBNAME) in ({}) order by COLNO "

        pda_metadata_query = "SELECT ATTNAME AS COLUMN_NAME,NAME AS TABLE_NAME,SCHEMA AS TABLE_SCHEMA,ATTNUM AS COLUMN_NUMBER,FORMAT_TYPE AS COLUMN_TYPE,ATTCOLLENG AS COLUMN_LENGTH,'0' AS KEY_SEQ \
                            FROM _V_RELATION_COLUMN \
                            WHERE UPPER(TYPE) = 'TABLE' AND \
                            UPPER(SCHEMA) in ({}) \
                            AND UPPER(NAME) in ({}) "

        conf = ReadConfig()
        source_target_table_mapping = conf.read_source_target_table_mapping()
        print(source_target_table_mapping)
        '''Get source and target db metadata'''
        print("Step 1: Get source table list.")
        print("Step 2: Get source tables' column list to file")
        source_schema_list = []
        target_schema_list = []
        source_table_list = []
        target_table_list = []
        for item in source_target_table_mapping:
            source_schema_list.append(item['SRC_OBJ_NM'].split('.')[0])
            source_table_list.append(item['SRC_OBJ_NM'].split('.')[1])
            target_schema_list.append(item['TRGT_TBL_NM'].split('.')[0])
            target_table_list.append(item['TRGT_TBL_NM'].split('.')[1])
        source_schema_list = list(set(source_schema_list))
        target_schema_list = list(set(target_schema_list))
        source_table_list = list(set(source_table_list))
        target_table_list = list(set(target_table_list))
        print("Step 3: Get target table list.")
        '''get source tables' metadata'''
        source_db_driver = conf.Read_db_config(source_db_node)['driver']
        #db_driver = db_node['driver']
        if source_db_driver == '{IBM DB2 ODBC DRIVER}' or source_db_driver == 'com.ibm.db2.jcc.DB2Driver':
            source_query = db2_metadata_query.format(
                str(source_schema_list).strip('[').strip(']'),
                str(source_table_list).strip('[').strip(']'))
            print(source_query)
        else:
            source_query = pda_metadata_query.format(
                str(source_schema_list).strip('[').strip(']'),
                str(source_table_list).strip('[').strip(']'))
        print(source_query)
        if source_db_driver == 'com.ibm.db2.jcc.DB2Driver':
            source_metadata = db_connect.exec_sql_with_jdbc(
                source_db_node, source_user, source_pwd, source_query)
        else:
            source_metadata = db_connect.exec_sql_common(
                source_db_node, source_user, source_pwd, source_query)
        '''table to map'''
        source_table_columns_dict = {}
        for item in source_metadata:
            source_table_columns = item['TABLE_SCHEMA'].strip(
            ) + "." + item['TABLE_NAME']
            column_dict = {}
            column_dict['COLUMN_NAME'] = item['COLUMN_NAME']
            column_dict['COLUMN_NUMBER'] = item['COLUMN_NUMBER']
            column_dict['COLUMN_TYPE'] = item['COLUMN_TYPE']
            column_dict['COLUMN_LENGTH'] = item['COLUMN_LENGTH']
            column_dict['KEY_SEQ'] = item['KEY_SEQ']
            if source_table_columns_dict.__contains__(source_table_columns):
                source_table_columns_dict[source_table_columns].append(
                    column_dict)
            else:
                column_list = []
                column_list.append(column_dict)
                source_table_columns_dict[source_table_columns] = column_list
        print(source_table_columns_dict)
        '''Store the table mapping to a temp file'''
        file_name = os.path.join(conf.read_temp_dir(), 'source_metadata.tmp')
        print(file_name)
        with open(file_name, 'w') as f:
            json.dump(source_table_columns_dict, f)
        print("Step 4: Get target tables' column list.")
        '''get target tables' metadata'''
        target_db_driver = conf.Read_db_config(target_db_node)['driver']
        print('target db driver:' + target_db_driver)
        if target_db_driver == '{IBM DB2 ODBC DRIVER}' or target_db_driver == 'com.ibm.db2.jcc.DB2Driver':
            target_query = db2_metadata_query.format(
                str(target_schema_list).strip('[').strip(']'),
                str(target_table_list).strip('[').strip(']'))
            print(target_query)
        else:
            target_query = pda_metadata_query.format(
                str(target_schema_list).strip('[').strip(']'),
                str(target_table_list).strip('[').strip(']'))
        print(target_query)

        if target_db_driver == 'com.ibm.db2.jcc.DB2Driver':
            target_metadata = db_connect.exec_sql_with_jdbc(
                target_db_node, target_user, target_pwd, target_query)
        else:
            target_metadata = db_connect.exec_sql_common(
                target_db_node, target_user, target_pwd, target_query)
        '''table to map'''
        target_table_columns_dict = {}
        for item in target_metadata:
            target_table_columns = item['TABLE_SCHEMA'].strip(
            ) + "." + item['TABLE_NAME']
            column_dict = {}
            column_dict['COLUMN_NAME'] = item['COLUMN_NAME']
            column_dict['COLUMN_NUMBER'] = item['COLUMN_NUMBER']
            column_dict['COLUMN_TYPE'] = item['COLUMN_TYPE'].split('(')[0]
            column_dict['COLUMN_LENGTH'] = item['COLUMN_LENGTH']
            column_dict['KEY_SEQ'] = item['KEY_SEQ']
            if target_table_columns_dict.__contains__(target_table_columns):
                target_table_columns_dict[target_table_columns].append(
                    column_dict)
            else:
                column_list = []
                column_list.append(column_dict)
                target_table_columns_dict[target_table_columns] = column_list
        print(target_table_columns_dict)
        '''Store the target metadata a temp file'''
        file_name = os.path.join(conf.read_temp_dir(), 'target_metadata.tmp')
        print(file_name)
        with open(file_name, 'w') as f:
            json.dump(target_table_columns_dict, f)
        '''Build source_target_column_mapping'''
        print("step 5: get source/target tables column mapping")
        source_target_column_mapping = []
        for item in source_target_table_mapping:
            source_table = item['SRC_OBJ_NM']
            target_table = item['TRGT_TBL_NM']
            source_columns = source_table_columns_dict[source_table]
            target_columns = target_table_columns_dict[target_table]
            for src_col in source_columns:
                for tar_col in target_columns:
                    if tar_col['COLUMN_NUMBER'] == src_col['COLUMN_NUMBER']:
                        source_target_column_mapping.append({"SOURCE_TABLE": source_table, "TARGET_TABLE": target_table,\
                                                             "SOURCE_COLUMN": src_col['COLUMN_NAME'],\
                                                             "TARGET_COLUMN": tar_col['COLUMN_NAME'],\
                                                             "SOURCE_COLUMN_NUMBER": src_col['COLUMN_NUMBER'],\
                                                             "TARGET_COLUMN_NUMBER": tar_col['COLUMN_NUMBER']})
        print(source_target_column_mapping)
        '''Store to temp'''
        file_name = os.path.join(conf.read_temp_dir(),
                                 'source_target_column_mapping.tmp')
        print(file_name)
        with open(file_name, 'w') as f:
            json.dump(source_target_column_mapping, f)
        '''For each source build key_value mapping of columns'''
        source_target_column_mapping_dict = {}
        one_table_src_tgt_col_mapping_dict = {}
        for items in source_target_column_mapping:
            if source_target_column_mapping_dict.__contains__(
                    items['SOURCE_TABLE']):
                one_table_src_tgt_col_mapping_dict[
                    items['SOURCE_COLUMN']] = items['TARGET_COLUMN']
                source_target_column_mapping_dict[
                    items['SOURCE_TABLE']] = one_table_src_tgt_col_mapping_dict
            else:
                one_table_src_tgt_col_mapping_dict = {}
                one_table_src_tgt_col_mapping_dict[
                    items['SOURCE_COLUMN']] = items['TARGET_COLUMN']
                source_target_column_mapping_dict[
                    items['SOURCE_TABLE']] = one_table_src_tgt_col_mapping_dict
        print("source_target_column_mapping_dict" +
              str(source_target_column_mapping_dict))

        print("For each source table get source table sample data")
        for item in source_target_table_mapping:
            source_table = item['SRC_OBJ_NM']
            target_table = item['TRGT_TBL_NM']
            print("Source table name:" + source_table)
            source_key = []
            source_column_list = []
            target_column_list = []
            source_where_condition = conf.Read_where_condition(source_table)
            for row in source_table_columns_dict[source_table]:
                source_column_list.append(row['COLUMN_NAME'])
                if row['KEY_SEQ'] != '0':
                    source_key.append(row['COLUMN_NAME'])

            print('source_column_list:' + str(source_column_list))
            print('source_key:' + str(source_key))
            for row in target_table_columns_dict[target_table]:
                target_column_list.append(row['COLUMN_NAME'])
            print("Target_column_list:" + str(target_column_list))
            source_column_str = str(source_column_list).strip('[').strip(
                ']').replace("'", '')
            target_column_str = str(target_column_list).strip('[').strip(
                ']').replace("'", '')
            print('Source Column str:' + source_column_str)
            print('Target Column str:' + target_column_str)
            source_sample_query_run_flag = False
            target_sample_query_run_flag = False
            if source_where_condition != 'NULL':
                source_sample_query = "select {} from {} {}".format(
                    source_column_str, source_table, source_where_condition)
                print("source_sample_query:" + source_sample_query)
                target_where_condition = self.source_condition_transfer(
                    source_table, source_where_condition)
                target_sample_query = "select {} from {} {}".format(
                    target_column_str, target_table, target_where_condition)
                print("target_sample_query" + target_sample_query)

            elif len(source_key) != 0:
                source_sample_query = "with a as (select RAND()*50 as RANDOM_KEY, {} from {} \
                order by RANDOM_KEY fetch first 10 rows only) select {} from a order by {} asc" \
                    .format(source_column_str, source_table, source_column_str,
                            str(source_key).strip('[').strip(']').replace("'", ''))
                print(source_sample_query)
                if source_db_driver == 'com.ibm.db2.jcc.DB2Driver':
                    source_sample_data = db_connect.exec_sql_with_jdbc(
                        'siwdb2_jdbc', 'pdaetlg', 'sep09sep',
                        source_sample_query)
                else:
                    source_sample_data = db_connect.exec_sql_common(
                        'xx', 'xx', 'xx', source_sample_query)
                source_sample_query_run_flag = True
                '''format timestamp'''

                source_sample_data_formated = eval(
                    self.Date_time_format_transfer(str(source_sample_data)))
                #print(type(source_sample_data_formated),type(source_sample_data_formated[0]),source_sample_data_formated)
                file_name = os.path.join(conf.read_temp_dir(),
                                         source_table + "_sample.tmp")
                with open(file_name, 'w') as f:
                    json.dump(source_sample_data_formated, f)

                target_condition_str = " where "
                target_key_list = []
                for item in source_key:
                    target_key = ''
                    primary_key_value_list = []
                    for row in source_target_column_mapping:
                        if row['SOURCE_COLUMN'] == item and row[
                                'SOURCE_TABLE'] == source_table:
                            target_key = row['TARGET_COLUMN']
                            target_key_list.append(target_key)
                    for row in source_sample_data:
                        primary_key_value_list.append(row[item])
                    if item == source_key[-1]:
                        target_condition_str = target_condition_str + target_key + " in ({})".format(
                            str(primary_key_value_list).strip('[').strip(']'))
                    else:
                        target_condition_str = target_condition_str + target_key + " in ({}) and ".format(
                            str(primary_key_value_list).strip('[').strip(']'))
                target_condition_str += "order by {} asc".format(
                    str(target_key).strip('[').strip(']').replace("'", ''))
                print(str(target_condition_str))
                target_sample_query = "select {} from {} {}".format(
                    target_column_str, target_table, target_condition_str)
                print(target_sample_query)
            else:
                source_sample_query = "select {} from {}".format(
                    source_column_str, source_table)
                target_sample_query = "select {} from {}".format(
                    target_column_str, target_table)

            if source_sample_query_run_flag == False:
                print("Source table name:" + source_table)
                source_db_driver = 'com.ibm.db2.jcc.DB2Driver'
                if source_db_driver == 'com.ibm.db2.jcc.DB2Driver':
                    source_sample_data = db_connect.exec_sql_with_jdbc(
                        'siwdb2_jdbc', 'pdaetlg', 'sep09sep',
                        source_sample_query)
                else:
                    source_sample_data = db_connect.exec_sql_common(
                        'xx', 'xx', 'xx', source_sample_query)
                '''format timestamp'''

                source_sample_data_formated = eval(
                    self.Date_time_format_transfer(str(source_sample_data)))
                #print(type(json.loads(source_sample_data_formated)),json.loads(source_sample_data_formated))
                file_name = os.path.join(conf.read_temp_dir(),
                                         source_table + "_sample.tmp")
                with open(file_name, 'w') as f:
                    json.dump(source_sample_data_formated, f)

            if target_sample_query_run_flag == False:
                print("Target table name:" + target_table)
                if target_db_driver == 'com.ibm.db2.jcc.DB2Driver':
                    target_sample_data = db_connect.exec_sql_with_jdbc(
                        'xx', 'xx', 'xx', target_sample_query)
                else:
                    target_sample_data = db_connect.exec_sql_common(
                        'siwodspda', 'siwgit', 'SIWJul2019JulSIW',
                        target_sample_query)
                print(target_sample_data)
                file_name = os.path.join(conf.read_temp_dir(),
                                         target_table + "_sample.tmp")
                with open(file_name, 'w') as f:
                    json.dump(target_sample_data, f)
                '''validation'''
                source_diff_list = []
                target_diff_list = []
                for source_row in source_sample_data_formated:
                    for target_row in target_sample_data:
                        compare_flag = False
                        for k, v in source_target_column_mapping_dict[
                                source_table].items():
                            if target_row[v] == source_row[k]:
                                compare_flag = True
                            else:
                                compare_flag = False
                                break
                        if compare_flag == True:
                            break
                    if compare_flag == False:
                        source_diff_list.append(source_row)
                    else:
                        pass

                for target_row in target_sample_data:
                    for source_row in source_sample_data_formated:
                        compare_flag = False
                        for k, v in source_target_column_mapping_dict[
                                source_table].items():
                            if source_row[k] == target_row[v]:
                                compare_flag = True
                            else:
                                compare_flag = False
                                break
                        if compare_flag == True:
                            break
                    if compare_flag == False:
                        target_diff_list.append(target_row)
                    else:
                        pass
                print("source_diff_list:" + str(source_diff_list))
                print("target_diff_list:" + str(target_diff_list))