Beispiel #1
0
def Run_ds_job_on_windows(ds_node, ds_user, ds_pwd, job_name,
                          job_stream_params, **kw):
    conf = ReadConfig()
    host_info = conf.Read_ds_conf(ds_node)
    cmd_path = conf.Read_DS_command_path()
    job_stream_parameter_list = conf.Read_job_stream_parameter_name_list()

    ########assign job stream to the driver job
    job_stream_count = len(job_stream_params)
    job_stream_appendix = ''
    for i in range(len(job_stream_parameter_list)):
        job_stream = ' -param ' + job_stream_parameter_list[
            i] + '=' + '"' + job_stream_params[i] + '"'
        job_stream_appendix += job_stream
    #print(job_stream_appendix)

    ##########assign other input parameter to the driver job
    params_appendix = ''
    if len(kw) != 0:
        for key in kw:
            param = ' -param ' + key + '=' + '"' + kw[key] + '"'
            params_appendix += param
    #print(params_appendix)
    cmd_str = cmd_path + 'dsjob' + ' -domain ' + host_info['domain'] + ' -user ' + ds_user +' -password ' +ds_pwd \
    +' -server ' + host_info['host'] +' -run -wait -mode NORMAL ' + job_stream_appendix + params_appendix \
    +' ' + host_info['project'] +' '+job_name
    cmd_str += '\n'
    print("DataStage command: " + cmd_str.replace(ds_pwd, '********'))
    rs = os.popen(cmd=cmd_str, mode='r')
    print(rs.readlines())
    return rs
Beispiel #2
0
 def inner(*args,**kwargs):
     if testcase == 'ASCA':
         rc = ReadConfig()
         print('ASCA test case started at:%s' % datetime.datetime.now())
         with open(rc.read_asca_test_description(),'r',encoding='utf-8') as f:
             description = f.read()
         print(description)
         return test_case_func(*args, **kwargs)
Beispiel #3
0
def get_connStr(uid, pwd, db_node):
    conf = ReadConfig()
    db = conf.Read_db_config(db_node)
    if db['driver'] == '{IBM DB2 ODBC DRIVER}':
        con_str = "DRIVER={};DATABASE={};HOSTNAME={};PORT={};UID={};PWD={}".format(db['driver'],db['database'],db['hostname'],db['port'],uid,pwd)
    elif db['driver'] == '{NetezzaSQL}':
        con_str = "DRIVER={};SERVER={};PORT={};DATABASE={};UID={};PWD={}".format(db['driver'],db['hostname'],db['port'],db['database'],uid,pwd)
    else:
        con_str = ''
    return con_str
 def inner(*args, **kwargs):
     if testcase == 'jobStreamPositive':
         rc = ReadConfig()
         print('Job Stream test case started at:%s' % datetime.datetime.now() )
         with open(rc.read_job_stream_test_description(), 'r', encoding='utf-8') as f:
             description = f.read()
             print(description)
         return test_case_func(*args, **kwargs)
     elif testcase == 'jobStreamNegative':
         print('Job stream negative test case started at: %s', datetime.datetime.now())
         return test_case_func(*args, **kwargs)
Beispiel #5
0
def Run_ds_job_on_linux(ds_node, ds_user, ds_pwd, job_name):
    conf = ReadConfig()
    host_info = conf.Read_ds_conf(ds_node)
    cmd_path = conf.Read_DS_command_path()
    cmd_str = cmd_path + 'dsjob' + '-domain ' + host_info[
        'domain'] + '-user ' + ds_user + '-password ' + ds_pwd + '-server ' + host_info[
            'host'] + '-run ' + job_name
    cmd_str += '\ndir'
    print("DataStage command: " + cmd_str)
    rs = os.popen(cmd=cmd_str, mode='r')
    print(rs.readlines())
Beispiel #6
0
    def generate_source_target_column_map(self,id,pwd,source_schema,source_table,target_schema,target_table):
        db2_column_sample_sql = '''
        SELECT NAME as COL_NM,COLNO as COL_NO
        FROM SYSIBM.SYSCOLUMNS
        WHERE UPPER(TBNAME) IN (SELECT UPPER(NAME) FROM SYSIBM.SYSTABLES WHERE TYPE = 'T') AND
        UPPER(TBCREATOR) = UPPER('{}') -- Schema Name
        AND UPPER (TBNAME) = UPPER('{}') ORDER BY COLNO
        '''
        pda_column_sample_sql = """
        SELECT  ATTNAME as COL_NM,ATTNUM as COL_NO
        FROM _V_RELATION_COLUMN 
        WHERE UPPER(TYPE) = 'TABLE' AND 
        UPPER(SCHEMA) = UPPER('BDWDB') -- Schema Name
        AND UPPER(NAME) = UPPER('{}') ORDER BY ATTNUM;
        """
        print(db2_column_sample_sql)
        print(pda_column_sample_sql)

        '''read source data dic'''

        conf = ReadConfig()
        source_db_node = conf.Read_source_db_node()
        target_db_node = conf.Read_target_db_node()
        source_db = conf.Read_db_config(source_db_node)
        target_db = conf.Read_db_config(target_db_node)
        if source_db['db_type'] == 'db2':
            source_db_sql = db2_column_sample_sql.format(source_schema,source_db)
        elif source_db['db_type'] == 'pda':
            source_db_sql = db2_column_sample_sql.format(source_schema,source_db)
        else:
            source_db_sql = None
            print("The db type is valid")

        if target_db['db_type'] == 'db2':
            target_db_sql = db2_column_sample_sql.format(source_schema,source_db)
        elif target_db['db_type'] == 'pda':
            target_db_sql = db2_column_sample_sql.format(source_schema,source_db)
        else:
            target_db_sql = None
            print("The db type is valid")
        ''' run under source to get source columns'''
        print(source_db_sql)
        print(target_db_sql)
        source_target_mapping = dict()
        rs_source = db_connect.exec_sql_common(source_db_node, id, pwd, source_db_sql)
        rs_target = db_connect.exec_sql_common(target_db_node, id, pwd, target_db_sql)

        for src_line in rs_source:
            source_column_nm = src_line['COL_NM']
            source_column_no = src_line['COL_NO']
            for tgt_line in rs_target:
                if tgt_line['COL_NO'] == source_column_no:
                    source_target_mapping[source_column_nm] = tgt_line['COL_NM']
        return source_target_mapping
def get_dependency_job_list(sequence_name):
    conf = ReadConfig()
    job_list = conf.Read_job_list()
    job_stream_list = []
    for job in job_list:
        if job['DEPENDENCY_JOB'] == sequence_name:
            '''there's defect that we not consider the jobs which has no job_id'''
            if job['JOB_ID'] != '' and job['ASCA_CONTROL_POINT_ID'] != '':
                job_stream = job['JOB_NAME'] + "@" + job['JOB_ID'] + "@" + job[
                    'ASCA_CONTROL_POINT_ID']
            else:
                job_stream = job['JOB_NAME']
            job_stream_list.append(job_stream)
    return job_stream_list
Beispiel #8
0
 def inner(*args, **kwargs):
     if testcase == 'IWRefresh_positive':
         rc = ReadConfig()
         print('IW Refresh positive test case started at:%s' %
               datetime.datetime.now())
         with open(rc.read_iw_refresh_test_description(),
                   'r',
                   encoding='utf-8') as f:
             description = f.read()
         print(description)
         return test_case_func(*args, **kwargs)
     elif testcase == 'IWRefresh_negative':
         print('IWRefresh negative test case started at: %s',
               datetime.datetime.now())
         return test_case_func(*args, **kwargs)
    def get_job_stream_dependency(self, sequence_name):
        recursive_job_list = []
        conf = ReadConfig()
        job_list = conf.Read_job_list()  
        for job in job_list:
            if job['DEPENDENCY_JOB'] == sequence_name:  
                recursive_job_list.append(job['JOB_NAME'])
                if job['JOB_TYPE'] == 'Sequence':
                    job_list_2 = conf.Read_job_list()
                    for job_2 in job_list_2:  
                        if job_2['DEPENDENCY_JOB'] == job['JOB_NAME']:
                            recursive_job_list.append(job_2['JOB_NAME'])

        print("Dependent job list" + str(recursive_job_list))
        return recursive_job_list
Beispiel #10
0
def exec_sql_with_jdbc(db_node,user,pwd,query):
    conf = ReadConfig()
    db = conf.Read_db_config(db_node)
    jdbc_driver = db['driver']
    jdbc_url = db['url']
    jdbc_user = user
    jdbc_pwd = pwd
    jdbc_query = '"'+query+'"'
    java_path = conf.Read_Java_home()
    current_path = os.path.dirname(os.path.realpath(__file__))
    JDBC_path = os.path.join(current_path,'../JDBC/Query_JDBC.jar')
    command = java_path + '/java -jar ' + JDBC_path + ' '+jdbc_driver+' '+jdbc_url+' '+jdbc_user+' '+jdbc_pwd+' '+jdbc_query
    print("\tRunning Command:"+command)
    rs = os.popen(cmd=command, mode='r')
    query_result = json.loads(rs.readlines()[0])
    return query_result
Beispiel #11
0
    def rowcount_test(self, source_id, source_pwd, target_id,target_pwd):
        '''read config file for the table list'''
        conf = ReadConfig()
        table_list = conf.Read_table_list()
        '''define source db,target db'''
        source_db_node = conf.Read_source_db_node()
        target_db_node = conf.Read_target_db_node()
        rowcount_report = []
        fail_count = 0
        for table in table_list:
            rowcount_result = dict()
            source_table = table['SOURCE_SCHEMA'] + '.' + table['SOURCE_TABLE']
            target_table = table['TARGET_SCHEMA'] + '.' + table['TARGET_TABLE']
            source_sql = 'select count(*) as rowcount from {}'.format(source_table)
            target_sql = 'select count(*) as rowcount from {}'.format(target_table)
            '''connect to source and target DB to run the sql'''
            '''run sql in target first'''
            rs_source = db_connect.exec_sql_common(source_db_node, source_id, source_pwd, source_sql)
            source_rowcount = rs_source[0].rowcount
            rs_target = db_connect.exec_sql_common(target_db_node, target_id, target_pwd, target_sql)
            target_rowcount = rs_target[0].rowcount
            if source_rowcount == target_rowcount:
                status = 'PASS'
            else:
                status = 'FAIL'
                fail_count += 1

            rowcount_result['SOURCE_TABLE'] = source_table
            rowcount_result['TARGET_TABLE'] = target_table
            rowcount_result['SOURCE_ROWCOUNT'] = source_rowcount
            rowcount_result['TARGET_ROWCOUNT'] = target_rowcount
            rowcount_result['STATUS'] = status
            rowcount_report.append(rowcount_result)
        with open("tmp/rowcountreport.csv", "w") as f:
            f.write(str(rowcount_report))

        if fail_count > 0:
            print("The rowcount test passed,detail see the rowcount report")

        else:
            print("The rowcount test failed,detail see the rowcount report")
            raise TestException.RowcountError()
Beispiel #12
0
def Get_job_status(ds_node, ds_id, ds_pwd, job_name):
    conf = ReadConfig()
    host_info = conf.Read_ds_conf(ds_node)
    cmd_path = conf.Read_DS_command_path()
    cmd_str = cmd_path + 'dsjob' + ' -domain ' + host_info['domain'] + ' -user ' + ds_id +' -password ' +ds_pwd \
    +' -server ' + host_info['host'] +' -jobinfo '  \
    +' ' + host_info['project'] +' '+job_name
    cmd_str += '\n'
    print("DataStage command: " + cmd_str.replace(ds_pwd, '********'))
    rs = os.popen(cmd=cmd_str, mode='r')
    status_result = rs.readlines()
    status_dict = dict()
    #dict['Job Name']=job_name
    for i in range(len(status_result)):
        key = status_result[i].split('\t:')[0]
        value = status_result[i].split('\t:')[1].strip().replace('\n', '')
        status_dict[key] = value
    report_dict = dict({job_name: status_dict})
    #print(report_dict)
    return report_dict
def test_pre_action(ds_node, ds_user, ds_pwd):
    ''' define how to run the driver job, if the job is datastage sequence
    run below code '''
    conf = ReadConfig()
    driver = conf.Read_test_run_driver()
    driver_type = driver['driver_type']
    job_stream_param_name_list = conf.Read_job_stream_parameter_name_list()
    job_stream_count = len(job_stream_param_name_list)
    input_parameter = driver['input_parameter']
    driver_sequence = driver['driver_job']
    '''the job_stream_count will decide how many parallel job stream can run parallelly,
    that means how many job stream parameter should be assign to the driver sequence '''

    if driver_type == 'DataStage':
        '''when the driver is dataStage assign necessary parameter to the driver job '''
        job_stream_params = ['' for i in range(job_stream_count)]
        job_stream_list = get_dependency_job_list(driver_sequence)
        for i in range(len(job_stream_list)):
            param_index = i % job_stream_count
            job_stream_params[param_index] += job_stream_list[i] + ','
            ''' generate other parameters '''
            other_params = dict()
            if input_parameter != '':
                other_params_list = input_parameter.split(',')
                for param in other_params_list:
                    other_params[param.split('=')[0]] = param.split('=')[1]
                print(other_params)
        ''' send the job_stream_params to the driver sequence to run, input other parameters if necessary '''
        DS_Operation.Run_ds_job_on_windows(ds_node, ds_user, ds_pwd,
                                           driver_sequence, job_stream_params,
                                           **other_params)

    #''' if the driver is shell, should trigger the shell script with the necessary parameter '''
    elif driver_type == 'Shell':
        pass
    else:
        pass
Beispiel #14
0
    def iwefresh_positive_test(self, iwrefresh_db_node, iwrefresh_uid,
                               iwrefresh_pwd):
        '''read config file for the job stream'''
        conf = ReadConfig()
        iwrefresh_db_conf = conf.Read_db_config(iwrefresh_db_node)
        job_list = conf.Read_job_list()
        job_status_report = conf.Read_job_status_report()
        job_iw_control_report = []
        iw_refresh_failed_count = 0
        for job in job_list:
            data_group = job['DATAGROUP_ID']
            iw_refresh_control_status = dict()
            if data_group != '':
                job_name = job['JOB_NAME']
                job_run_status = job_status_report[job_name]['Job Status']
                job_start_time = self.job_status_time_transfer(
                    job_status_report[job_name]['Job Start Time'])
                job_end_time = self.job_status_time_transfer(
                    job_status_report[job_name]['Last Run Time'])
                '''step 1 validate if the parallel job show pass'''
                print("step 1: validate the run status of job: %s ..." % (job_name) +" \nstart time:" +str(job_start_time)+"\nend time:"\
                      + str(job_end_time))
                print(
                    "----------------------------------------------------------------------------------"
                )
                if job_run_status == 'RUN OK (1)' or job_run_status == 'RUN OK (1)':
                    print(
                        "     the job status is %s,the status check passed,go to next step"
                        % job_run_status)
                else:
                    print("The DataGroup Related job status is failed")
                    raise TestException.IWRefreshError()
                '''step 2 validate if the IW Refresh record shows pass and the IW Refresh start time big than job start time,
                    the end time is less than the end time '''
                '''get the bludb conf, and trige the db to run the sql'''

                print("step2: Get iw refresh status from iwrefresh db...")
                print(
                    "----------------------------------------------------------------------------------"
                )
                print("Data group: %s" % data_group)
                sql = "select B.DATAGROUP_NM as DATAGROUP_NM,A.STAT as STAT,A.LOAD_START as LOAD_START," \
                      "A.LOAD_END as LOAD_END from XLSSTG.LOAD_STATUS A inner join XLSSTG.DATA_GROUP B on " \
                      "A.DATAGROUP_ID = B.DATAGROUP_ID " \
                      "where B.DATAGROUP_NM= '{}\' AND A.LOAD_START > '{}' ".format(data_group, job_start_time)
                rs = db_connect.exec_sql_common(iwrefresh_db_node,
                                                iwrefresh_uid, iwrefresh_pwd,
                                                sql)
                print("RUNNING QUERY:%s" % sql)
                if len(rs) != 0:
                    iwrefresh_status = rs[0]['STAT']
                    iwrefresh_start_time = rs[0]['LOAD_START']
                    iwrefresh_end_time = rs[0]['LOAD_END']
                    '''gather info to report'''
                    iw_refresh_control_status['JOB_NM'] = job_name
                    iw_refresh_control_status['JOB_START_TIME'] = str(
                        job_start_time)
                    iw_refresh_control_status['JOB_END_TIME'] = str(
                        job_end_time)
                    iw_refresh_control_status['DATA_GROUP_NM'] = data_group
                    iw_refresh_control_status['IWREFRESH_START_TIME'] = str(
                        iwrefresh_start_time)
                    iw_refresh_control_status['IWREFRESH_END_TIME'] = str(
                        iwrefresh_end_time)
                    iw_refresh_control_status[
                        'IWREFRESH_STATUS_'] = iwrefresh_status
                    job_iw_control_report.append(iw_refresh_control_status)
                    print("IWRefresh status: %s" % iwrefresh_status)
                    '''1. check the iw refresh status'''
                    if iwrefresh_status == 'COMPLETE':

                        print(
                            '''\nWhen the job %s run finished, The data group \"%s\" shows \"%s\" ,the IW Refresh test passed
                              ''' % (job_name, data_group, iwrefresh_status))
                    else:
                        iw_refresh_failed_count += 1
                        print(
                            '''When the job %s run finished, The data group \"%s\" shows \"%s\" ,
                         the IW Refresh test passed''' %
                            (job_name, data_group, iwrefresh_status))

                else:
                    iw_refresh_failed_count += 1
                    print(
                        "The IW Refresh control not be triggered when the job %s start, the IW Refresh test failed"
                        % (job_name))
        '''generate the iw refresh report'''
        iw_report = Generate_report()
        iw_report.write_iwefresh_status_to_report(job_iw_control_report)
        iw_report.generate_iwrefresh_positive_report()

        if iw_refresh_failed_count != 0:
            print("one or more table's IWRefresh control failed, "
                  "check the iw_refresh_positive_test_report.xls for detail")
            raise TestException.IWRefreshError()
Beispiel #15
0
    def asca_test(self,asca_db_node,zos_user,zos_pwd):
        '''read config file for the job stream'''
        conf = ReadConfig()
        job_list = conf.Read_job_list()
        job_status_report = conf.Read_job_status_report()
        asca_control_test_report =[]
        '''Get asca_control_record through jdbc, store the result to the asca_control_dict'''
        '''1. get the asca control id list'''
        asca_control_id_list =[]
        for job in job_list:
            if job['ASCA_CONTROL_POINT_ID'] != '':
                asca_control_id_list.append(job['ASCA_CONTROL_POINT_ID'])
        asca_control_id_string = str(asca_control_id_list).strip('[').strip(']')

        '''2.generate the sql query'''
        print("Step 1: Get asca control result from ASCA.ASCA_CNTROL_RECORD table")
        query = "select JOB_ID,ASCA_CNTL_PT_ID,SRC_ROW_CNT,TRGT_ROW_CNT,\
        SRC_CNTL_AMT,TRGT_CNTL_AMT,ASCA_CNTL_REC_ID,CNTL_STAT,\
        ASCA_CNTL_RUN_DT,ASCA_CNTL_TMS from(\
        SELECT RANK() OVER(PARTITION BY ASCA_CNTL_PT_ID ORDER BY ASCA_CNTL_TMS DESC)\
        AS RANK_NUM,JOB_ID,ASCA_CNTL_PT_ID,SRC_ROW_CNT,TRGT_ROW_CNT,\
        SRC_CNTL_AMT,TRGT_CNTL_AMT,ASCA_CNTL_REC_ID,CNTL_STAT,\
        ASCA_CNTL_RUN_DT,ASCA_CNTL_TMS from ASCA.ASCA_CONTROL_RECORD) AA WHERE AA.RANK_NUM=1\
        and AA.ASCA_CNTL_PT_ID in({})".format(asca_control_id_string)
        print("\tQuery:"+query)

        '''3. Trigger jdbc driver to query the data'''
        asca_control_result = db_connect.exec_sql_with_jdbc(asca_db_node, zos_user, zos_pwd, query)
        #print(asca_control_result)
        print("\tQuery running completed")
        print("Step 2:  start the validation the asca control result...")

        '''For each job, link the job name with the asca_control_result,perform validation and generate the report'''
        for job in job_list:
            if job['ASCA_CONTROL_POINT_ID'] != '':
                job_name = job['JOB_NAME']
                job_asca_cntl_pt_id =job['ASCA_CONTROL_POINT_ID']
                job_run_status = job_status_report[job_name]['Job Status']
                job_last_run_time = self.job_status_time_transfer(job_status_report[job_name]['Last Run Time'])
                print("\tValidated Job Name:"+job_name)
                '''step 1 validate if the parallel job show Complete'''
                if job_run_status == 'RUN OK (1)' or job_run_status == 'RUN OK (1)':
                    print("\t\tJob Status:" + job_run_status)
                else :
                    print("Job Status:" + job_run_status)
                    print("The parallel status validate date is failed,the test case not pass")
                    raise TestException.ASCAControlError()
                '''step 2 validate if the ASCA Control record shows pass and the ASCA CONTORL TMS is after the job last run time'''
                ''' get the asca control result from jdbc_query,with the same asca_control_pt_id'''
                exist_flag = False
                for asca_control_record in asca_control_result:
                    if asca_control_record['ASCA_CNTL_PT_ID'] == job_asca_cntl_pt_id:
                        #print(asca_control_record['ASCA_CNTL_PT_ID']+"vs"+job_asca_cntl_pt_id )
                        asca_control_test_report_row = dict()
                        exist_flag = True
                        asca_control_tms = datetime.datetime.strptime(asca_control_record['ASCA_CNTL_TMS'][0:19], "%Y-%m-%d %H:%M:%S")
                        if asca_control_tms > job_last_run_time:
                            asca_control_test_report_row['ASCA_CNTL_PT_ID'] = job_asca_cntl_pt_id
                            asca_control_test_report_row['JOB_NAME'] = job_name
                            asca_control_test_report_row['JOB_STATUS'] = job_run_status
                            asca_control_test_report_row['JOB_LAST_RUN_TIME'] = str(job_last_run_time)
                            asca_control_test_report_row['SOURCE_ROW_COUNT'] = asca_control_record['SRC_ROW_CNT']
                            asca_control_test_report_row['TARGET_ROW_COUNT'] = asca_control_record['TRGT_ROW_CNT']
                            asca_control_test_report_row['ASCA_CONTROL_STATUS'] = asca_control_record['CNTL_STAT']
                            asca_control_test_report_row['ASCA_CONTROL_TMS'] = asca_control_record['ASCA_CNTL_TMS']
                            asca_control_test_report_row['ASCA_TEST_RESULT'] = asca_control_record['CNTL_STAT']
                        else:
                            asca_control_test_report_row['ASCA_CNTL_PT_ID'] = job_asca_cntl_pt_id
                            asca_control_test_report_row['JOB_NAME'] = job_name
                            asca_control_test_report_row['JOB_STATUS'] = job_run_status
                            asca_control_test_report_row['JOB_LAST_RUN_TIME'] = str(job_last_run_time)
                            asca_control_test_report_row['SOURCE_ROW_COUNT'] = 'NULL'
                            asca_control_test_report_row['TARGET_ROW_COUNT'] = 'NULL'
                            asca_control_test_report_row['ASCA_CONTROL_STATUS'] = 'NULL'
                            asca_control_test_report_row['ASCA_CONTROL_TMS'] = 'NULL'
                            asca_control_test_report_row['ASCA_TEST_RESULT'] = 'FAIL'
                        asca_control_test_report.append(asca_control_test_report_row)
                        print("\t\tASCA_CONTROL_POINT_ID:" + asca_control_test_report_row['ASCA_CNTL_PT_ID'])
                        print("\t\tSOURCE_TABLE_ROW_COUNT:" + asca_control_test_report_row['SOURCE_ROW_COUNT'])
                        print("\t\tTARGET_TABLE_ROW_COUNT" + asca_control_test_report_row['TARGET_ROW_COUNT'])
                        print("\t\tRow Count Validate result:" + asca_control_test_report_row['ASCA_TEST_RESULT'])
                    #print("When the control id is"+job_asca_cntl_pt_id+ "asca_control_test_report"+str(asca_control_test_report))
                if exist_flag == False:
                    asca_control_test_report_row = dict()
                    asca_control_test_report_row['ASCA_CNTL_PT_ID'] = job_asca_cntl_pt_id
                    asca_control_test_report_row['JOB_NAME'] = job_name
                    asca_control_test_report_row['JOB_STATUS'] = job_run_status
                    asca_control_test_report_row['JOB_LAST_RUN_TIME'] = str(job_last_run_time)
                    asca_control_test_report_row['SOURCE_ROW_COUNT'] = 'NULL'
                    asca_control_test_report_row['TARGET_ROW_COUNT'] = 'NULL'
                    asca_control_test_report_row['ASCA_CONTROL_STATUS'] = 'NULL'
                    asca_control_test_report_row['ASCA_CONTROL_TMS'] = 'NULL'
                    asca_control_test_report_row['ASCA_TEST_RESULT'] = 'FAIL'
                    asca_control_test_report.append(asca_control_test_report_row)
                    print("\t\tASCA_CONTROL_POINT_ID:" + asca_control_test_report_row['ASCA_CNTL_PT_ID'])
                    print("\t\tSOURCE_TABLE_ROW_COUNT:" + asca_control_test_report_row['SOURCE_ROW_COUNT'])
                    print("\t\tTARGET_TABLE_ROW_COUNT" + asca_control_test_report_row['TARGET_ROW_COUNT'])
                    print("\t\tRow Count Validate result:" + asca_control_test_report_row['ASCA_TEST_RESULT'])
        ####################After all the records inserted to the asca_control_test_report
        '''Write dict to json file, then generate the xls report file through json file'''
        #print(asca_control_test_report)
        gen_asca = Generate_report()
        gen_asca.write_asca_status_to_json(asca_control_test_report)
        gen_asca.generate_asca_control_test_report()

        '''validate the test case result'''
        failed_count=0
        for item in asca_control_test_report:
            if item['ASCA_TEST_RESULT'] == 'FAIL':
                failed_count += 1
        if  failed_count > 0:
            print("One or more jobs' asca control not got pass, "
                  "check the asca_control_test_report.xls for detail")
            raise TestException.ASCAControlError()
        else:
            print("All jobs' asca control result got pass, ASCA Control test passed.")
Beispiel #16
0
    def rowcount_test(asca_db_node, zos_user, zos_pwd):
        '''Step 1 get source/target table list from asca.asca_control_record'''
        conf = ReadConfig()
        job_list = conf.Read_job_list()
        '''Get asca_control_record through jdbc, store the result to the asca_control_dict'''
        '''1. get the asca control id list'''
        asca_control_id_list = []
        for job in job_list:
            if job['ASCA_CONTROL_POINT_ID'] != '':
                asca_control_id_list.append(job['ASCA_CONTROL_POINT_ID'])
        asca_control_id_string = str(asca_control_id_list).strip('[').strip(
            ']')
        '''2.generate the sql query'''
        print(
            "Step 1: Get asca control result from ASCA.ASCA_CNTROL_RECORD table"
        )
        query = "select SRC_OBJ_NM,TRGT_TBL_NM from ASCA.ASCA_control_point\
                WHERE ASCA_CNTL_PT_ID in ({})".format(asca_control_id_string)
        print("\tQuery:" + query)
        '''3. Trigger jdbc driver to query the data'''
        source_target_mapping = db_connect.exec_sql_with_jdbc(
            asca_db_node, zos_user, zos_pwd, query)
        '''Store the table mapping to a temp file'''
        file_name = os.path.join(conf.read_temp_dir(),
                                 'source_target_mapping.tmp')
        print(file_name)
        with open(file_name, 'w') as f:
            json.dump(source_target_mapping, f)

        print(source_target_mapping)
        print("\tQuery running completed")
        print("Step 2:  start the get source table row count...")
        '''generate query'''
        source_db_node = conf.Read_source_db_node()
        query_str = ''
        for i in range(len(source_target_mapping)):
            if i < len(source_target_mapping) - 1:
                query_str += "select '" + source_target_mapping[i][
                    'SRC_OBJ_NM'] + "' as TABLE_NM, count(*) as ROWCOUNT from " + source_target_mapping[
                        i]['SRC_OBJ_NM'] + " union "
            else:
                query_str += "select '" + source_target_mapping[i][
                    'SRC_OBJ_NM'] + "' as TABLE_NM, count(*) as ROWCOUNT from " + source_target_mapping[
                        i]['SRC_OBJ_NM']
        print(query_str)
        '''run the query '''
        source_rowcount = db_connect.exec_sql_with_jdbc(
            source_db_node, zos_user, zos_pwd, query_str)
        print(source_rowcount)

        print("Step 3: start get target table row count...")
        '''generate target query'''
        target_query = ''
        for i in range(len(source_target_mapping)):
            if i < len(source_target_mapping) - 1:
                target_query += "select '" + source_target_mapping[i][
                    'TRGT_TBL_NM'] + "' as TABLE_NM, count(*) as ROWCOUNT from " + source_target_mapping[
                        i]['TRGT_TBL_NM'] + " union "
            else:
                target_query += "select '" + source_target_mapping[i][
                    'TRGT_TBL_NM'] + "' as TABLE_NM, count(*) as ROWCOUNT from " + source_target_mapping[
                        i]['TRGT_TBL_NM']
        print(target_query)
        '''get target db node'''
        target_db_node = conf.Read_target_db_node()
        db_conf = conf.Read_db_config(target_db_node)
        db_driver = db_conf['driver']
        print(db_driver)
        if db_driver == 'com.ibm.db2.jcc.DB2Driver':
            '''use jdbc to run query'''
            target_rowcount = db_connect.exec_sql_with_jdbc(
                target_db_node, zos_user, zos_pwd, target_query)
        else:
            '''use common driver to run query'''
            target_rowcount = db_connect.exec_sql_common(
                target_db_node, 'siwsit', 'SIWJul2019JulSIW', target_query)
        print(target_rowcount)
        '''Step 4: validation'''
        print("Step 4: validation")
        Rowcount_test_result = []
        for item in source_target_mapping:
            rowcount_record = {}
            rowcount_record['SOURCE_TABLE'] = item['SRC_OBJ_NM']
            rowcount_record['TARGET_TABLE'] = item['TRGT_TBL_NM']
            for element in source_rowcount:
                if element['TABLE_NM'] == item['SRC_OBJ_NM']:
                    rowcount_record['SOURCE_ROWCOUNT'] = str(
                        element['ROWCOUNT'])
            for element in target_rowcount:
                if element['TABLE_NM'] == item['TRGT_TBL_NM']:
                    rowcount_record['TARGET_ROWCOUNT'] = str(
                        element['ROWCOUNT'])
            rowcount_record['TEST_RESULT'] = "PASS" if (
                rowcount_record['SOURCE_ROWCOUNT']
                == rowcount_record['TARGET_ROWCOUNT']) else "FAIL"
            print("Source table name:" + rowcount_record['SOURCE_TABLE'])
            print("Target table name:" + rowcount_record['TARGET_TABLE'])
            print("Source table rowcount:" +
                  rowcount_record['SOURCE_ROWCOUNT'])
            print("Target table rowcount:" +
                  rowcount_record['TARGET_ROWCOUNT'])
            print("Row count test result:" + rowcount_record['TEST_RESULT'])
            Rowcount_test_result.append(rowcount_record)
        print(Rowcount_test_result)
        '''generate report'''
        gen_rowcount = Generate_report()
        gen_rowcount.write_row_count_status_to_json(Rowcount_test_result)
        gen_rowcount.generate_row_count_test_report()
        '''validate the test case result'''
        failed_count = 0
        for item in Rowcount_test_result:
            if item['TEST_RESULT'] == 'FAIL':
                failed_count += 1
        if failed_count > 0:
            print(
                "One or more tables' rowcount between source and target mismatch, row count test failed "
                "check the row_count_test_report.xls for detail")
            raise TestException.RowcountError()
        else:
            print(
                "All tables' row count between source and target matched,the row count test passed."
            )
Beispiel #17
0
def main_job(args):
    if args[1] == 'positive_test_pre_action':
        test_pre_action.test_pre_action(args[2], args[3], args[4])
        return

    if args[1] == 'job_stream_test':
        if args[2] == 'positive':
            conf = ReadConfig()
            driver_sequence = conf.Read_Driver_Sequence()
            job_stream_test = Job_stream_test()
            print("driver_sequence:" + driver_sequence)
            ds_node = args[3]
            ds_id = args[4]
            ds_pwd = args[5]
            try:
                job_stream_test.job_stream_positive_test(
                    ds_node, ds_id, ds_pwd, driver_sequence)
            except JobStreamError as e:
                print(e.message)
                sys.exit(1)
        elif args[1] == 'negative':
            Job_stream_test.job_stream_positive_test()
        else:
            print('The test type is not valid')
        return

    if args[1] == 'iw_refresh_test':
        if args[2] == 'positive':
            iw_refresh_db_node = args[3]
            iw_db_user = args[4]
            iw_db_pwd = args[5]
            try:
                iw_test = IWRefresh_test()
                print('positive')
                iw_test.iwefresh_positive_test(iw_refresh_db_node, iw_db_user,
                                               iw_db_pwd)
            except JobStreamError as e:
                print(e.message)
                sys.exit(1)
        elif args[1] == 'negative':
            iw_test = IWRefresh_test()
            iw_test.iwefresh_negative_test()
        else:
            print('The test type is not valid')
        return

    if args[1] == 'asca_control_test':
        uid = args[2]
        pwd = args[3]
        try:
            asca_test = ASCA_test()
            conf = ReadConfig()
            asca_db_node = conf.Read_asca_db_node()
            asca_test.asca_test(asca_db_node, uid, pwd)
        except ASCAControlError as e:
            print(e.message)
            sys.exit(1)
        return

    if args[1] == 'rowcount_test':
        asca_uid = args[2]
        asca_pwd = args[3]
        try:
            rowcount_test = Rowcount_test()
            conf = ReadConfig()
            asca_db_node = conf.Read_asca_db_node()
            Rowcount_test.rowcount_test(asca_db_node, asca_uid, asca_pwd)
        except RowcountError as e:
            print(e.message)
            sys.exit(1)
        return
    else:
        print('The test case is not valid,please check your parameters')
    def sample_data_test(self, source_db_node, source_user, source_pwd,
                         target_db_node, target_user, target_pwd):
        print("Get necessary metadata from source and target")
        """Step 1 get source/target table list from source target table mapping"""
        db2_metadata_query = "SELECT NAME as COLUMN_NAME,TBNAME as TABLE_NAME,TBCREATOR AS TABLE_SCHEMA,COLNO AS COLUMN_NUMBER,COLTYPE AS COLUMN_TYPE,LENGTH AS COLUMN_LENGTH,KEYSEQ AS KEY_SEQ \
                            FROM SYSIBM.SYSCOLUMNS \
                            WHERE UPPER(TBNAME) IN (SELECT UPPER(NAME) FROM SYSIBM.SYSTABLES WHERE TYPE = 'T') AND \
                            UPPER(TBCREATOR) in ({}) \
                            AND UPPER (TBNAME) in ({}) order by COLNO "

        pda_metadata_query = "SELECT ATTNAME AS COLUMN_NAME,NAME AS TABLE_NAME,SCHEMA AS TABLE_SCHEMA,ATTNUM AS COLUMN_NUMBER,FORMAT_TYPE AS COLUMN_TYPE,ATTCOLLENG AS COLUMN_LENGTH,'0' AS KEY_SEQ \
                            FROM _V_RELATION_COLUMN \
                            WHERE UPPER(TYPE) = 'TABLE' AND \
                            UPPER(SCHEMA) in ({}) \
                            AND UPPER(NAME) in ({}) "

        conf = ReadConfig()
        source_target_table_mapping = conf.read_source_target_table_mapping()
        print(source_target_table_mapping)
        '''Get source and target db metadata'''
        print("Step 1: Get source table list.")
        print("Step 2: Get source tables' column list to file")
        source_schema_list = []
        target_schema_list = []
        source_table_list = []
        target_table_list = []
        for item in source_target_table_mapping:
            source_schema_list.append(item['SRC_OBJ_NM'].split('.')[0])
            source_table_list.append(item['SRC_OBJ_NM'].split('.')[1])
            target_schema_list.append(item['TRGT_TBL_NM'].split('.')[0])
            target_table_list.append(item['TRGT_TBL_NM'].split('.')[1])
        source_schema_list = list(set(source_schema_list))
        target_schema_list = list(set(target_schema_list))
        source_table_list = list(set(source_table_list))
        target_table_list = list(set(target_table_list))
        print("Step 3: Get target table list.")
        '''get source tables' metadata'''
        source_db_driver = conf.Read_db_config(source_db_node)['driver']
        #db_driver = db_node['driver']
        if source_db_driver == '{IBM DB2 ODBC DRIVER}' or source_db_driver == 'com.ibm.db2.jcc.DB2Driver':
            source_query = db2_metadata_query.format(
                str(source_schema_list).strip('[').strip(']'),
                str(source_table_list).strip('[').strip(']'))
            print(source_query)
        else:
            source_query = pda_metadata_query.format(
                str(source_schema_list).strip('[').strip(']'),
                str(source_table_list).strip('[').strip(']'))
        print(source_query)
        if source_db_driver == 'com.ibm.db2.jcc.DB2Driver':
            source_metadata = db_connect.exec_sql_with_jdbc(
                source_db_node, source_user, source_pwd, source_query)
        else:
            source_metadata = db_connect.exec_sql_common(
                source_db_node, source_user, source_pwd, source_query)
        '''table to map'''
        source_table_columns_dict = {}
        for item in source_metadata:
            source_table_columns = item['TABLE_SCHEMA'].strip(
            ) + "." + item['TABLE_NAME']
            column_dict = {}
            column_dict['COLUMN_NAME'] = item['COLUMN_NAME']
            column_dict['COLUMN_NUMBER'] = item['COLUMN_NUMBER']
            column_dict['COLUMN_TYPE'] = item['COLUMN_TYPE']
            column_dict['COLUMN_LENGTH'] = item['COLUMN_LENGTH']
            column_dict['KEY_SEQ'] = item['KEY_SEQ']
            if source_table_columns_dict.__contains__(source_table_columns):
                source_table_columns_dict[source_table_columns].append(
                    column_dict)
            else:
                column_list = []
                column_list.append(column_dict)
                source_table_columns_dict[source_table_columns] = column_list
        print(source_table_columns_dict)
        '''Store the table mapping to a temp file'''
        file_name = os.path.join(conf.read_temp_dir(), 'source_metadata.tmp')
        print(file_name)
        with open(file_name, 'w') as f:
            json.dump(source_table_columns_dict, f)
        print("Step 4: Get target tables' column list.")
        '''get target tables' metadata'''
        target_db_driver = conf.Read_db_config(target_db_node)['driver']
        print('target db driver:' + target_db_driver)
        if target_db_driver == '{IBM DB2 ODBC DRIVER}' or target_db_driver == 'com.ibm.db2.jcc.DB2Driver':
            target_query = db2_metadata_query.format(
                str(target_schema_list).strip('[').strip(']'),
                str(target_table_list).strip('[').strip(']'))
            print(target_query)
        else:
            target_query = pda_metadata_query.format(
                str(target_schema_list).strip('[').strip(']'),
                str(target_table_list).strip('[').strip(']'))
        print(target_query)

        if target_db_driver == 'com.ibm.db2.jcc.DB2Driver':
            target_metadata = db_connect.exec_sql_with_jdbc(
                target_db_node, target_user, target_pwd, target_query)
        else:
            target_metadata = db_connect.exec_sql_common(
                target_db_node, target_user, target_pwd, target_query)
        '''table to map'''
        target_table_columns_dict = {}
        for item in target_metadata:
            target_table_columns = item['TABLE_SCHEMA'].strip(
            ) + "." + item['TABLE_NAME']
            column_dict = {}
            column_dict['COLUMN_NAME'] = item['COLUMN_NAME']
            column_dict['COLUMN_NUMBER'] = item['COLUMN_NUMBER']
            column_dict['COLUMN_TYPE'] = item['COLUMN_TYPE'].split('(')[0]
            column_dict['COLUMN_LENGTH'] = item['COLUMN_LENGTH']
            column_dict['KEY_SEQ'] = item['KEY_SEQ']
            if target_table_columns_dict.__contains__(target_table_columns):
                target_table_columns_dict[target_table_columns].append(
                    column_dict)
            else:
                column_list = []
                column_list.append(column_dict)
                target_table_columns_dict[target_table_columns] = column_list
        print(target_table_columns_dict)
        '''Store the target metadata a temp file'''
        file_name = os.path.join(conf.read_temp_dir(), 'target_metadata.tmp')
        print(file_name)
        with open(file_name, 'w') as f:
            json.dump(target_table_columns_dict, f)
        '''Build source_target_column_mapping'''
        print("step 5: get source/target tables column mapping")
        source_target_column_mapping = []
        for item in source_target_table_mapping:
            source_table = item['SRC_OBJ_NM']
            target_table = item['TRGT_TBL_NM']
            source_columns = source_table_columns_dict[source_table]
            target_columns = target_table_columns_dict[target_table]
            for src_col in source_columns:
                for tar_col in target_columns:
                    if tar_col['COLUMN_NUMBER'] == src_col['COLUMN_NUMBER']:
                        source_target_column_mapping.append({"SOURCE_TABLE": source_table, "TARGET_TABLE": target_table,\
                                                             "SOURCE_COLUMN": src_col['COLUMN_NAME'],\
                                                             "TARGET_COLUMN": tar_col['COLUMN_NAME'],\
                                                             "SOURCE_COLUMN_NUMBER": src_col['COLUMN_NUMBER'],\
                                                             "TARGET_COLUMN_NUMBER": tar_col['COLUMN_NUMBER']})
        print(source_target_column_mapping)
        '''Store to temp'''
        file_name = os.path.join(conf.read_temp_dir(),
                                 'source_target_column_mapping.tmp')
        print(file_name)
        with open(file_name, 'w') as f:
            json.dump(source_target_column_mapping, f)
        '''For each source build key_value mapping of columns'''
        source_target_column_mapping_dict = {}
        one_table_src_tgt_col_mapping_dict = {}
        for items in source_target_column_mapping:
            if source_target_column_mapping_dict.__contains__(
                    items['SOURCE_TABLE']):
                one_table_src_tgt_col_mapping_dict[
                    items['SOURCE_COLUMN']] = items['TARGET_COLUMN']
                source_target_column_mapping_dict[
                    items['SOURCE_TABLE']] = one_table_src_tgt_col_mapping_dict
            else:
                one_table_src_tgt_col_mapping_dict = {}
                one_table_src_tgt_col_mapping_dict[
                    items['SOURCE_COLUMN']] = items['TARGET_COLUMN']
                source_target_column_mapping_dict[
                    items['SOURCE_TABLE']] = one_table_src_tgt_col_mapping_dict
        print("source_target_column_mapping_dict" +
              str(source_target_column_mapping_dict))

        print("For each source table get source table sample data")
        for item in source_target_table_mapping:
            source_table = item['SRC_OBJ_NM']
            target_table = item['TRGT_TBL_NM']
            print("Source table name:" + source_table)
            source_key = []
            source_column_list = []
            target_column_list = []
            source_where_condition = conf.Read_where_condition(source_table)
            for row in source_table_columns_dict[source_table]:
                source_column_list.append(row['COLUMN_NAME'])
                if row['KEY_SEQ'] != '0':
                    source_key.append(row['COLUMN_NAME'])

            print('source_column_list:' + str(source_column_list))
            print('source_key:' + str(source_key))
            for row in target_table_columns_dict[target_table]:
                target_column_list.append(row['COLUMN_NAME'])
            print("Target_column_list:" + str(target_column_list))
            source_column_str = str(source_column_list).strip('[').strip(
                ']').replace("'", '')
            target_column_str = str(target_column_list).strip('[').strip(
                ']').replace("'", '')
            print('Source Column str:' + source_column_str)
            print('Target Column str:' + target_column_str)
            source_sample_query_run_flag = False
            target_sample_query_run_flag = False
            if source_where_condition != 'NULL':
                source_sample_query = "select {} from {} {}".format(
                    source_column_str, source_table, source_where_condition)
                print("source_sample_query:" + source_sample_query)
                target_where_condition = self.source_condition_transfer(
                    source_table, source_where_condition)
                target_sample_query = "select {} from {} {}".format(
                    target_column_str, target_table, target_where_condition)
                print("target_sample_query" + target_sample_query)

            elif len(source_key) != 0:
                source_sample_query = "with a as (select RAND()*50 as RANDOM_KEY, {} from {} \
                order by RANDOM_KEY fetch first 10 rows only) select {} from a order by {} asc" \
                    .format(source_column_str, source_table, source_column_str,
                            str(source_key).strip('[').strip(']').replace("'", ''))
                print(source_sample_query)
                if source_db_driver == 'com.ibm.db2.jcc.DB2Driver':
                    source_sample_data = db_connect.exec_sql_with_jdbc(
                        'siwdb2_jdbc', 'pdaetlg', 'sep09sep',
                        source_sample_query)
                else:
                    source_sample_data = db_connect.exec_sql_common(
                        'xx', 'xx', 'xx', source_sample_query)
                source_sample_query_run_flag = True
                '''format timestamp'''

                source_sample_data_formated = eval(
                    self.Date_time_format_transfer(str(source_sample_data)))
                #print(type(source_sample_data_formated),type(source_sample_data_formated[0]),source_sample_data_formated)
                file_name = os.path.join(conf.read_temp_dir(),
                                         source_table + "_sample.tmp")
                with open(file_name, 'w') as f:
                    json.dump(source_sample_data_formated, f)

                target_condition_str = " where "
                target_key_list = []
                for item in source_key:
                    target_key = ''
                    primary_key_value_list = []
                    for row in source_target_column_mapping:
                        if row['SOURCE_COLUMN'] == item and row[
                                'SOURCE_TABLE'] == source_table:
                            target_key = row['TARGET_COLUMN']
                            target_key_list.append(target_key)
                    for row in source_sample_data:
                        primary_key_value_list.append(row[item])
                    if item == source_key[-1]:
                        target_condition_str = target_condition_str + target_key + " in ({})".format(
                            str(primary_key_value_list).strip('[').strip(']'))
                    else:
                        target_condition_str = target_condition_str + target_key + " in ({}) and ".format(
                            str(primary_key_value_list).strip('[').strip(']'))
                target_condition_str += "order by {} asc".format(
                    str(target_key).strip('[').strip(']').replace("'", ''))
                print(str(target_condition_str))
                target_sample_query = "select {} from {} {}".format(
                    target_column_str, target_table, target_condition_str)
                print(target_sample_query)
            else:
                source_sample_query = "select {} from {}".format(
                    source_column_str, source_table)
                target_sample_query = "select {} from {}".format(
                    target_column_str, target_table)

            if source_sample_query_run_flag == False:
                print("Source table name:" + source_table)
                source_db_driver = 'com.ibm.db2.jcc.DB2Driver'
                if source_db_driver == 'com.ibm.db2.jcc.DB2Driver':
                    source_sample_data = db_connect.exec_sql_with_jdbc(
                        'siwdb2_jdbc', 'pdaetlg', 'sep09sep',
                        source_sample_query)
                else:
                    source_sample_data = db_connect.exec_sql_common(
                        'xx', 'xx', 'xx', source_sample_query)
                '''format timestamp'''

                source_sample_data_formated = eval(
                    self.Date_time_format_transfer(str(source_sample_data)))
                #print(type(json.loads(source_sample_data_formated)),json.loads(source_sample_data_formated))
                file_name = os.path.join(conf.read_temp_dir(),
                                         source_table + "_sample.tmp")
                with open(file_name, 'w') as f:
                    json.dump(source_sample_data_formated, f)

            if target_sample_query_run_flag == False:
                print("Target table name:" + target_table)
                if target_db_driver == 'com.ibm.db2.jcc.DB2Driver':
                    target_sample_data = db_connect.exec_sql_with_jdbc(
                        'xx', 'xx', 'xx', target_sample_query)
                else:
                    target_sample_data = db_connect.exec_sql_common(
                        'siwodspda', 'siwgit', 'SIWJul2019JulSIW',
                        target_sample_query)
                print(target_sample_data)
                file_name = os.path.join(conf.read_temp_dir(),
                                         target_table + "_sample.tmp")
                with open(file_name, 'w') as f:
                    json.dump(target_sample_data, f)
                '''validation'''
                source_diff_list = []
                target_diff_list = []
                for source_row in source_sample_data_formated:
                    for target_row in target_sample_data:
                        compare_flag = False
                        for k, v in source_target_column_mapping_dict[
                                source_table].items():
                            if target_row[v] == source_row[k]:
                                compare_flag = True
                            else:
                                compare_flag = False
                                break
                        if compare_flag == True:
                            break
                    if compare_flag == False:
                        source_diff_list.append(source_row)
                    else:
                        pass

                for target_row in target_sample_data:
                    for source_row in source_sample_data_formated:
                        compare_flag = False
                        for k, v in source_target_column_mapping_dict[
                                source_table].items():
                            if source_row[k] == target_row[v]:
                                compare_flag = True
                            else:
                                compare_flag = False
                                break
                        if compare_flag == True:
                            break
                    if compare_flag == False:
                        target_diff_list.append(target_row)
                    else:
                        pass
                print("source_diff_list:" + str(source_diff_list))
                print("target_diff_list:" + str(target_diff_list))
                            else:
                                compare_flag = False
                                break
                        if compare_flag == True:
                            break
                    if compare_flag == False:
                        target_diff_list.append(target_row)
                    else:
                        pass
                print("source_diff_list:" + str(source_diff_list))
                print("target_diff_list:" + str(target_diff_list))


if __name__ == "__main__":

    conf = ReadConfig()
    sample_data = Sample_data_test()
    sample_data.sample_data_test('siwdb2_jdbc', 'pdaetlg', 'sep09sep',
                                 'siwodspda', 'siwgit', 'SIWJul2019JulSIW')

    #source_target_table_mapping = conf.read_source_target_table_mapping()
    #print(source_target_table_mapping)
    #source_metadata = json.load(open('../tmp/source_metadata.tmp','r'))
    #print(source_metadata)
    #target_metadata = json.load(open('../tmp/target_metadata.tmp','r'))
    #print(target_metadata)
    '''
    source_target_column_mapping = json.load(open('../tmp/source_target_column_mapping.tmp', 'r'))

    #str = "[{'REV_COST_TYP_CD': 'C', 'STATUS': 'O', 'REV_COST_CAT_DESC': 'BCP Elimination                                                                                     ', 'REV_COST_CAT_CD': 'C0110', 'REV_COST_MINOR_CD': '0110', 'UPDATED_AT_TS': '2007-04-02 06:44:39.696625', 'UPDATED_BY_CNUM': '          '}, {'REV_COST_TYP_CD': 'C', 'STATUS': 'O', 'REV_COST_CAT_DESC': 'H/W Maintenance                                                                                     ', 'REV_COST_CAT_CD': 'C0133', 'REV_COST_MINOR_CD': '0133', 'UPDATED_AT_TS': '2007-04-02 06:44:39.696625', 'UPDATED_BY_CNUM': '          '}, {'REV_COST_TYP_CD': 'C', 'STATUS': 'O', 'REV_COST_CAT_DESC': 'Rated Services                                                                                      ', 'REV_COST_CAT_CD': 'C0138', 'REV_COST_MINOR_CD': '0138', 'UPDATED_AT_TS': '2007-04-02 06:44:39.696625', 'UPDATED_BY_CNUM': '          '}, {'REV_COST_TYP_CD': 'C', 'STATUS': 'O', 'REV_COST_CAT_DESC': 'Shared Infrastructure                                                                               ', 'REV_COST_CAT_CD': 'C0139', 'REV_COST_MINOR_CD': '0139', 'UPDATED_AT_TS': '2007-04-02 06:44:39.696625', 'UPDATED_BY_CNUM': '          '}, {'REV_COST_TYP_CD': 'C', 'STATUS': 'O', 'REV_COST_CAT_DESC': 'Over/Under Allocation                                                                               ', 'REV_COST_CAT_CD': 'C0140', 'REV_COST_MINOR_CD': '0140', 'UPDATED_AT_TS': '2007-04-02 06:44:39.696625', 'UPDATED_BY_CNUM': '          '}, {'REV_COST_TYP_CD': 'C', 'STATUS': 'O', 'REV_COST_CAT_DESC': 'Provisions                                                                                          ', 'REV_COST_CAT_CD': 'C0141', 'REV_COST_MINOR_CD': '0141', 'UPDATED_AT_TS': '2007-04-02 06:44:39.696625', 'UPDATED_BY_CNUM': '          '}, {'REV_COST_TYP_CD': 'C', 'STATUS': 'O', 'REV_COST_CAT_DESC': 'Transition Amortization                                                                             ', 'REV_COST_CAT_CD': 'C0144', 'REV_COST_MINOR_CD': '0144', 'UPDATED_AT_TS': '2011-04-13 20:29:35.555406', 'UPDATED_BY_CNUM': '          '}, {'REV_COST_TYP_CD': 'C', 'STATUS': 'O', 'REV_COST_CAT_DESC': 'NON-OPERATING ACQUISITION RELATED CHARGES                                                           ', 'REV_COST_CAT_CD': 'C0294', 'REV_COST_MINOR_CD': '0294', 'UPDATED_AT_TS': '2011-01-10 16:32:50.29779', 'UPDATED_BY_CNUM': '          '}, {'REV_COST_TYP_CD': 'C', 'STATUS': 'O', 'REV_COST_CAT_DESC': 'Non-Operating Activity for Pension                                                                  ', 'REV_COST_CAT_CD': 'C0297', 'REV_COST_MINOR_CD': '0297', 'UPDATED_AT_TS': '2011-01-10 16:38:20.614118', 'UPDATED_BY_CNUM': '          '}, {'REV_COST_TYP_CD': 'C', 'STATUS': 'O', 'REV_COST_CAT_DESC': 'Expense Recovery Cost                                                                               ', 'REV_COST_CAT_CD': 'C0600', 'REV_COST_MINOR_CD': '0600', 'UPDATED_AT_TS': '2007-04-02 06:44:39.696625', 'UPDATED_BY_CNUM': '          '}]"
    #result = sample_data.Date_time_format_transfer(str)
        '''when the driver is dataStage assign necessary parameter to the driver job '''
        job_stream_params = ['' for i in range(job_stream_count)]
        job_stream_list = get_dependency_job_list(driver_sequence)
        for i in range(len(job_stream_list)):
            param_index = i % job_stream_count
            job_stream_params[param_index] += job_stream_list[i] + ','
            ''' generate other parameters '''
            other_params = dict()
            if input_parameter != '':
                other_params_list = input_parameter.split(',')
                for param in other_params_list:
                    other_params[param.split('=')[0]] = param.split('=')[1]
                print(other_params)
        ''' send the job_stream_params to the driver sequence to run, input other parameters if necessary '''
        DS_Operation.Run_ds_job_on_windows(ds_node, ds_user, ds_pwd,
                                           driver_sequence, job_stream_params,
                                           **other_params)

    #''' if the driver is shell, should trigger the shell script with the necessary parameter '''
    elif driver_type == 'Shell':
        pass
    else:
        pass


if __name__ == "__main__":
    conf = ReadConfig()
    sequence_nm = conf.Read_Driver_Sequence()
    print(get_dependency_job_list(sequence_nm))
    test_pre_action('dev', 'dsdev', 'Jan2019Jan')
Beispiel #21
0
class Generate_report:
    conf = ReadConfig()
    job_positive_status_json = conf.read_stream_positive_status_report_file()
    iwrefresh_status_json = conf.read_iw_refresh_status_report_file()
    asca_status_json_file = conf.read_asca_control_json_file()
    job_list = conf.Read_job_list()
    job_stream_positive_test_report = conf.read_job_stream_positive_test_report(
    )
    iw_refresh_positive_test_report = conf.read_iw_refresh_positive_test_report(
    )
    asca_control_test_report = conf.read_asca_control_test_report()
    row_count_test_json_file = conf.read_row_count_json_file()
    row_count_test_report = conf.read_row_count_test_report()

    def Append_job_status_to_report(self, test_type, job_status_dict):
        if test_type == 'jobstream_positive':
            file_name = self.job_positive_status_json
        else:
            file_name = ''
        with open(file_name, 'r+') as f:
            file_lines = f.read()
            f.seek(0, 0)
            if len(file_lines) != 0:
                file_json = json.loads(file_lines)
                new_json_dict = dict()
                for k, v in file_json.items():
                    new_json_dict[k] = v
                for k, v in job_status_dict.items():
                    new_json_dict[k] = v
                f.write(json.dumps(new_json_dict))
            else:
                f.write(json.dumps(job_status_dict))

    def write_asca_status_to_json(self, asca_status_json_string):
        file_name = self.asca_status_json_file
        #print(asca_status_json_string)
        with open(file_name, 'w') as f:
            json.dump(asca_status_json_string, f)

    def write_iwefresh_status_to_report(self, iw_refresh_report_json):
        file_name = self.iwrefresh_status_json
        with open(file_name, 'w') as f:
            json.dump(iw_refresh_report_json, f)

    def write_row_count_status_to_json(self, row_count_status_json_string):
        file_name = self.row_count_test_json_file
        #print(asca_status_json_string)
        with open(file_name, 'w') as f:
            json.dump(row_count_status_json_string, f)

    def generate_jobstream_positive_report(self):
        file_name = self.job_positive_status_json
        job_positive_status_dict = dict()
        with open(file_name, 'r+') as f:
            job_positive_status_dict = json.load(fp=f)
        wb = xlwt.Workbook()
        ws = wb.add_sheet('job_stream_positive')
        ws.write(0, 0, 'JOB_NAME')
        ws.write(0, 1, 'JOB_TYPE')
        ws.write(0, 2, 'JOB_STATUS')
        ws.write(0, 3, 'START_TIME')
        ws.write(0, 4, 'END_TIME')
        rows = []
        for k in job_positive_status_dict:
            v = job_positive_status_dict[k]
            row = []
            job_name = k
            job_type = ''
            for job in self.job_list:
                if job['JOB_NAME'] == job_name:
                    job_type = job['JOB_TYPE']
            job_status = v['Job Status']
            start_time = v['Job Start Time']
            end_time = v['Last Run Time']
            row.append(job_name)
            row.append(job_type)
            row.append(job_status)
            row.append(start_time)
            row.append(end_time)
            rows.append(row)
        for i in range(1, len(rows) + 1):
            for j in range(5):
                ws.write(i, j, rows[i - 1][j])
        saved_file_name = self.job_stream_positive_test_report
        wb.save(saved_file_name)

    def generate_iwrefresh_positive_report(self):
        file_name = self.iwrefresh_status_json
        iwrefresh_positive_report_dict = dict()
        with open(file_name, 'r+') as f:
            iwrefresh_positive_report_dict = json.load(fp=f)
        wb = xlwt.Workbook()
        ws = wb.add_sheet('iwrefresh_positive')
        ws.write(0, 0, 'JOB_NM')
        ws.write(0, 1, 'JOB_START_TIME')
        ws.write(0, 2, 'JOB_END_TIME')
        ws.write(0, 3, 'DATA_GROUP_NM')
        ws.write(0, 4, 'IWREFRESH_START_TIME')
        ws.write(0, 5, 'IWREFRESH_END_TIME')
        ws.write(0, 6, 'IWREFRESH_STATUS')
        ws.write(0, 7, 'TEST_STATUS')
        rows = []
        for item in iwrefresh_positive_report_dict:
            row = []
            row.append(item['JOB_NM'])
            row.append(item['JOB_START_TIME'])
            row.append(item['JOB_END_TIME'])
            row.append(item['DATA_GROUP_NM'])
            row.append(item['IWREFRESH_START_TIME'])
            row.append(item['IWREFRESH_END_TIME'])
            row.append(item['IWREFRESH_STATUS_'])
            row.append('PASS' if item['IWREFRESH_STATUS_'] ==
                       'COMPLETE' else 'FAIL')
            rows.append(row)
        for i in range(1, len(rows) + 1):
            for j in range(8):
                ws.write(i, j, rows[i - 1][j])
        saved_file_name = self.iw_refresh_positive_test_report
        wb.save(saved_file_name)

    def generate_asca_control_test_report(self):
        file_name = self.asca_status_json_file
        #iwrefresh_positive_report_dict = dict()
        with open(file_name, 'r+') as f:
            asca_report_dict = json.load(fp=f)
        wb = xlwt.Workbook()
        ws = wb.add_sheet('asca_control')
        ws.write(0, 0, 'ASCA_CNTL_PT_ID')
        ws.write(0, 1, 'JOB_NAME')
        ws.write(0, 2, 'JOB_STATUS')
        ws.write(0, 3, 'JOB_LAST_RUN_TIME')
        ws.write(0, 4, 'SOURCE_ROW_COUNT')
        ws.write(0, 5, 'TARGET_ROW_COUNT')
        ws.write(0, 6, 'ASCA_CONTROL_STATUS')
        ws.write(0, 7, 'ASCA_CONTROL_TMS')
        ws.write(0, 8, 'ASCA_TEST_RESULT')
        rows = []
        for item in asca_report_dict:
            row = []
            row.append(item['ASCA_CNTL_PT_ID'])
            row.append(item['JOB_NAME'])
            row.append(item['JOB_STATUS'])
            row.append(item['JOB_LAST_RUN_TIME'])
            row.append(item['SOURCE_ROW_COUNT'])
            row.append(item['TARGET_ROW_COUNT'])
            row.append(item['ASCA_CONTROL_STATUS'])
            row.append(item['ASCA_CONTROL_TMS'])
            row.append(item['ASCA_TEST_RESULT'])
            rows.append(row)
        for i in range(1, len(rows) + 1):
            for j in range(9):
                ws.write(i, j, rows[i - 1][j])
        saved_file_name = self.asca_control_test_report
        wb.save(saved_file_name)

    def generate_row_count_test_report(self):
        file_name = self.row_count_test_json_file
        # iwrefresh_positive_report_dict = dict()
        with open(file_name, 'r+') as f:
            row_count_report_dict = json.load(fp=f)
        wb = xlwt.Workbook()
        ws = wb.add_sheet('row_count_report')
        ws.write(0, 0, 'SOURCE_TABLE')
        ws.write(0, 1, 'TARGET_TABLE')
        ws.write(0, 2, 'SOURCE_ROWCOUNT')
        ws.write(0, 3, 'TARGET_ROWCOUNT')
        ws.write(0, 4, 'TEST_RESULT')
        rows = []
        for item in row_count_report_dict:
            row = []
            row.append(item['SOURCE_TABLE'])
            row.append(item['TARGET_TABLE'])
            row.append(item['SOURCE_ROWCOUNT'])
            row.append(item['TARGET_ROWCOUNT'])
            row.append(item['TEST_RESULT'])
            rows.append(row)
        for i in range(1, len(rows) + 1):
            for j in range(5):
                ws.write(i, j, rows[i - 1][j])
        saved_file_name = self.row_count_test_report
        wb.save(saved_file_name)
Beispiel #22
0
    def sample_data_test(self, id, pwd):
        '''read conf'''
        conf = ReadConfig()
        table_list = conf.Read_table_list()
        source_db_node = conf.Read_source_db_node()
        target_db_node = conf.Read_target_db_node()
        sample_data_report = []
        failed_sample_report = []
        fail_cell_count = 0
        for table in table_list:
            '''get the source target column mapping'''
            source_target_column_mapping = self.generate_source_target_column_map(id, pwd,table['SOURCE_SCHEMA'],\
                table['SOURCE_TABLE'], table['TARGET_SCHEMA'], table['TARGET_TABLE'])
            source_table_nm = table['SOURCE_SCHEMA']+'.'+table['SOURCE_TABLE']
            target_table_nm = table['TARGET_SCHEMA'] + '.' + table['TARGET_TABLE']
            sample_source_condition = conf.Read_where_condition(table['SOURCE_TABLE'])
            sample_target_condition = conf.Read_where_condition(table['TARGET_TABLE'])
            source_sql = "select * from {} where {}".format(source_table_nm,sample_source_condition)
            target_sql = "select * from {} where {}".format(target_table_nm,sample_target_condition)
            rs_source = db_connect.exec_sql_common(source_db_node, id, pwd, source_sql)
            rs_target = db_connect.exec_sql_common(target_db_node, id, pwd, target_sql)
            source_row_count = len(rs_source)
            target_row_count = len(rs_target)
            source_column_count = len(rs_source[0])
            target_column_count = len(rs_target[0])
            print("source table: %s" % source_table_nm)
            print("target table: %s" % target_table_nm)

            '''step 1 compare the row number between source and target'''
            if source_row_count == target_row_count:
                print("The sample sql returns the same row count")
            else:
                print("The sample sql returns the different row count,the test validate failed")
                raise TestException.SampleDataError
                '''step 2 compare the column number between source and target'''
            if source_column_count == target_column_count + 3:
                print("The sample sql return the same column count")
            else:
                print("The sample sql returns the different row count,the test validate failed")
                raise TestException.SampleDataError

                '''step 3 loop to compare the result from source and target'''

                for i in range(source_row_count):
                    for k, v in source_target_column_mapping.items():
                        sample_compare_dict = dict()
                        source_value = rs_source[i][k]
                        target_value = rs_target[i][v]
                        sample_compare_dict['SOURCE_TABLE_NM'] = source_table_nm
                        sample_compare_dict['SOURCE_COLUMN_NM'] = k
                        sample_compare_dict['SOURCE_COLUMN_VALUE'] = source_value
                        sample_compare_dict['TARGET_TABLE_NM'] = target_table_nm
                        sample_compare_dict['TARGET_COLUMN_NM'] = v
                        sample_compare_dict['TARGET_COLUMN_VALUE'] = target_value
                        sample_compare_dict['STATUS'] = ('PASS' if(source_value == target_value) else 'FAIL')
                        sample_data_report.append(sample_compare_dict)
                        if sample_compare_dict['STATUS'] == 'FAIL':
                            failed_sample_report.append[sample_compare_dict]
        '''write failed record to failed file '''
        with open("tmp/failed_sample_data_report.json",'w',encoding ='UTF-8') as f:
            json.dump(failed_sample_report, f)
        '''write all the record to report file'''
        with open("tmp/sample_data_report.json",'w',encoding ='UTF-8') as f:
            json.dump(sample_data_report, f)

        if len(failed_sample_report) > 0:
            print("There are some cell values not equal, the test failed,check the failed report file for detail")
            raise TestException.SampleDataError()
        else:
            print("The sample data test run passed")
        return 'PASS'