Beispiel #1
0
 def _create_table(self):
     """This step will create the flat and itemized tables"""
     self.tablenames.append(self.tablename)
     drop_table_if_exists(self.tablename, self.db)
     self.logger.info('self.tablename={0}'.format(self.tablenames))
     self.logger.info('qry = {0}'.format(self.table.definition))
     self.db.executeNoResults(self.table.definition)
Beispiel #2
0
 def test_valid_mergespec_file_output_format(self):
     self._log.info(
         'test_valid_mergespec_file_output_format() - testing...')
     valid_ds_input_file = os.path.join(self._run_context.tests_safe_dir,
                                        DS_INPUT_FILE_VALID)
     input_tables = {FLAT_TABLE_KEY_NAME: VALID_DS_INPUT_TABLE_NAME}
     dbutilities.drop_table_if_exists(
         db_context=self._run_context.getDBContext(),
         table=VALID_DS_INPUT_TABLE_NAME)
     input_table_reader = SafeExcelReader(
         run_context=self._run_context,
         filename=valid_ds_input_file,
         sheet_name=DS_INPUT_FILE_SHEET_VALID,
         db_context=self._run_context.getDBContext(),
         output_table=VALID_DS_INPUT_TABLE_NAME)
     input_table_reader.createTable()
     mergespec_file = 'C:/{}'.format(MERGESPEC_OUTPUT_FILE)
     if os.path.exists(mergespec_file):
         os.remove(mergespec_file)
     cmpmrg = ComplementaryMerge(run_context=self._run_context)
     cmpmrg.create_mergespec_file(input_table_names=input_tables,
                                  new_mergespec_file=mergespec_file)
     self._log.info('test_valid_mergespec_file_output_format() - tested.')
     filename1 = mergespec_file
     filename2 = os.path.join(self._run_context.tests_safe_dir,
                              COMPARE_FILE_MERGESPEC_OUTPUT)
     assert filecmp.cmp(
         filename1,
         filename2), 'Python test output does not match expected output.'
Beispiel #3
0
 def _school_intervention(self):
     print " Create SCHOOL_INTERVENTION_ASCII"
     self.table_name = 'SCHOOL_INTERVENTION_ASCII'
     drop_table_if_exists( 'SCHOOL_INTERVENTION_ASCII', self.db)
     self.db.executeNoResults(sqls['SCHOOL_INTERVENTION'])
     self.db.executeNoResults("ALTER TABLE SCHOOL_INTERVENTION_ASCII DROP COLUMN DCRXID")
     self.db.executeNoResults("sp_RENAME 'SCHOOL_INTERVENTION_ASCII.DCRXID_RECODED','DCRXID','COLUMN'")
Beispiel #4
0
 def _district(self):
     print " Create DISTRICT_ASCII"
     self.table_name = 'DISTRICT_ASCII'
     drop_table_if_exists( 'DISTRICT_ASCII', self.db)
     self.db.executeNoResults(sqls['DISTRICT'])
     self.db.executeNoResults("ALTER TABLE DISTRICT_ASCII DROP COLUMN GRADE, RGRADE")
     self.db.executeNoResults("sp_RENAME 'DISTRICT_ASCII.GRADE_RECODED','GRADE','COLUMN'")
     self.db.executeNoResults("sp_RENAME 'DISTRICT_ASCII.GRADE_R_RECODED','GRADE_R','COLUMN'")
Beispiel #5
0
 def _school(self):
     print " Create SCHOOL_ASCII"
     self.table_name = 'SCHOOL_ASCII'
     drop_table_if_exists( 'SCHOOL_ASCII', self.db)
     self.db.executeNoResults(sqls['SCHOOL'])
     self.db.executeNoResults("ALTER TABLE SCHOOL_ASCII DROP COLUMN GRADE, RGRADE")
     self.db.executeNoResults("sp_RENAME 'SCHOOL_ASCII.GRADE_RECODED','GRADE','COLUMN'")
     self.db.executeNoResults("sp_RENAME 'SCHOOL_ASCII.GRADE_R_RECODED','GRADE_R','COLUMN'")
Beispiel #6
0
 def _create_table(self):
     """This step will create the flat and itemized tables"""
     self.tablenames.append(self.tablename)
     drop_table_if_exists( self.tablename, self.db)
     if self.debug:
         print 'self.tablename=', self.tablenames
         print 'qry=',self.table.definition
     self.db.executeNoResults(self.table.definition)
def populate_school_intervention(dbcontext):
    print " Create SCHOOL_INTERVENTION_ASCII"
    drop_table_if_exists('SCHOOL_INTERVENTION_ASCII', dbcontext)
    dbcontext.executeNoResults(sqls['SCHOOL_INTERVENTION'])
    dbcontext.executeNoResults(
        "ALTER TABLE SCHOOL_INTERVENTION_ASCII DROP COLUMN DCRXID")
    dbcontext.executeNoResults(
        "sp_RENAME 'SCHOOL_INTERVENTION_ASCII.DCRXID_RECODED','DCRXID','COLUMN'"
    )
def populate_school(dbcontext):
    print " Create SCHOOL_ASCII"
    drop_table_if_exists('SCHOOL_ASCII', dbcontext)
    dbcontext.executeNoResults(sqls['SCHOOL'])
    dbcontext.executeNoResults(
        "ALTER TABLE SCHOOL_ASCII DROP COLUMN GRADE, RGRADE")
    dbcontext.executeNoResults(
        "sp_RENAME 'SCHOOL_ASCII.GRADE_RECODED','GRADE','COLUMN'")
    dbcontext.executeNoResults(
        "sp_RENAME 'SCHOOL_ASCII.GRADE_R_RECODED','RGRADE','COLUMN'")
Beispiel #9
0
def main():
    """
    This runs a test of the complementary_merge wrapper/glue code.
    """
    run_context = SuiteContext('OGT_test{}'.format(RUN_CONTEXT_NUMBER))
    log = run_context.get_logger('ComplementaryMerge')
    db_context = run_context.getDBContext()

    intermediate_path = 'OGT Fall 2012' if (
        SUMMER_OR_FALL
        == 'F') else 'OGT Spring 2012'  # Summer has SAS variables.
    pathname = os.path.join(CVSROOT, 'CSSC Score Reporting', intermediate_path,
                            'Code/Development/Intake')
    bookmap_location_file_name = os.path.join(pathname,
                                              BOOKMAP_LOCATION_FILE_NAME)
    log.debug("main - bookmap_location_file_name[%s]" %
              bookmap_location_file_name)
    print("bookmap_location_file_name[%s]" % bookmap_location_file_name)
    mergespec_file_name = os.path.join(run_context.tests_safe_dir,
                                       MERGESPEC_FILE_NAME)

    input_table_names = {
        FLAT_TABLE_KEY_NAME: 'rc2FINAL',
        'C': 'mc_table_C',
        'M': 'mc_table_M',
        'R': 'mc_table_R',
        'S': 'mc_table_S',
        'W': 'mc_table_W'
    }
    output_table_names = {
        FLAT_TABLE_KEY_NAME: 'rc2FINAL_cmrg',
        'C': 'mc_table_C_cmrg',
        'M': 'mc_table_M_cmrg',
        'R': 'mc_table_R_cmrg',
        'S': 'mc_table_S_cmrg',
        'W': 'mc_table_W_cmrg'
    }

    for key in output_table_names:
        dbutilities.drop_table_if_exists(db_context=db_context,
                                         table=output_table_names[key])

    try:
        complementary_merge(
            run_context=run_context,
            bookmap_location_file_name=bookmap_location_file_name,
            bookmap_sheet=BOOKMAP_SHEET,
            mergespec_file_name=mergespec_file_name,
            input_table_names=input_table_names,
            output_table_names=output_table_names)
        #create_mergespec_file( run_context=run_context, input_table_names=input_table_names,
        #    new_mergespec_file='C:/new_mergespec_file.csv' )
    except Exception, error_msg:
        log.exception('\n\n')
        raise
Beispiel #10
0
 def _student(self):
     print " Create STUDENT_ASCII"
     self.table_name = 'STUDENT_ASCII'
     drop_table_if_exists( 'STUDENT_ASCII', self.db)
     self.db.executeNoResults(sqls['STUDENT'])
     self.db.executeNoResults("ALTER TABLE STUDENT_ASCII DROP COLUMN GRADE, RGRADE, UFRX_BREACH, UFSX_BREACH, UFWX_BREACH, UFCX_BREACH, UFMX_BREACH")
     self.db.executeNoResults("sp_RENAME 'STUDENT_ASCII.GRADE_RECODED','GRADE','COLUMN'")
     self.db.executeNoResults("sp_RENAME 'STUDENT_ASCII.GRADE_R_RECODED','GRADE_R','COLUMN'")
     self.db.executeNoResults("sp_RENAME 'STUDENT_ASCII.UFRX_BREACH_RECODED','UFRX_BREACH','COLUMN'")
     self.db.executeNoResults("sp_RENAME 'STUDENT_ASCII.UFMX_BREACH_RECODED','UFMX_BREACH','COLUMN'")
     self.db.executeNoResults("sp_RENAME 'STUDENT_ASCII.UFWX_BREACH_RECODED','UFWX_BREACH','COLUMN'")
     self.db.executeNoResults("sp_RENAME 'STUDENT_ASCII.UFSX_BREACH_RECODED','UFSX_BREACH','COLUMN'")
     self.db.executeNoResults("sp_RENAME 'STUDENT_ASCII.UFCX_BREACH_RECODED','UFCX_BREACH','COLUMN'")
Beispiel #11
0
    def test_merge_nonfuzzy_norecpriority(self):
        self._log.info('test_merge_nonfuzzy_norecpriority - testing...')
        mergespec_file = os.path.join(
            self._run_context.tests_safe_dir,
            MERGESPEC_FILE_VALID_NONFUZZY_NORECPRIORITY)
        valid_ds_input_file = os.path.join(self._run_context.tests_safe_dir,
                                           DS_INPUT_FILE_VALID)

        input_tables = {FLAT_TABLE_KEY_NAME: VALID_DS_INPUT_TABLE_NAME}
        output_tables = {
            FLAT_TABLE_KEY_NAME:
            VALID_DS_OUTPUT_TABLE_NAME_NONFUZZY_NORECPRIORITY
        }

        dbutilities.drop_table_if_exists(
            db_context=self._run_context.getDBContext(),
            table=VALID_DS_INPUT_TABLE_NAME)
        dbutilities.drop_table_if_exists(
            db_context=self._run_context.getDBContext(),
            table=VALID_DS_OUTPUT_TABLE_NAME_NONFUZZY_NORECPRIORITY)

        input_table_reader = SafeExcelReader(
            run_context=self._run_context,
            filename=valid_ds_input_file,
            sheet_name=DS_INPUT_FILE_SHEET_VALID,
            db_context=self._run_context.getDBContext(),
            output_table=VALID_DS_INPUT_TABLE_NAME)
        input_table_reader.createTable()

        cmpmrg = ComplementaryMerge(
            run_context=self._run_context,
            flat_table_identity_field_name='[import_order]')
        cmpmrg.complementary_merge(mergespec_file=mergespec_file,
                                   input_table_names=input_tables,
                                   output_table_names=output_tables,
                                   force_one_only=False)

        columns_str = (
            'Barcode, ID, LastName, FirstName, Score1, Race, Old, Score2, Score3, Score4, variable_priority,'
            ' record_priority, Attempt1, Attempt2, Attempt3, Attempt4, iep')
        filename1 = self._dump_table(
            VALID_DS_OUTPUT_TABLE_NAME_NONFUZZY_NORECPRIORITY,
            columns_str=columns_str,
            sort_column_nbrs=[1, 6, 0])
        filename2 = os.path.join(self._run_context.tests_safe_dir,
                                 COMPARE_FILE_NONFUZZY_NORECPRIORITY)
        assert filecmp.cmp(
            filename1,
            filename2), 'Python test output does not match SAS test output.'

        self._log.info('test_merge_nonfuzzy_norecpriority - tested.')
 def __init__(self, runcontext, dbcontext, merge_table_1 = " ", merge_table_2 = "", output_table = " ", mergeids = [], drop_columns = [], keep_columns = [] ):
     self.rc = runcontext
     self.db = dbcontext
     self.mergeids = [each.upper() for each in mergeids]
     self.merge_table_1 = merge_table_1.upper()
     self.merge_table_2 = merge_table_2.upper()
     self.drop_columns = [each.upper() for each in drop_columns]
     self.output_table = output_table.upper()
     self.common_columns = []
     self.keep_columns = [each.upper() for each in keep_columns]
     self.keep_columns_string = ','.join(self.keep_columns)
     self.table1_columns = []
     self.table2_columns = []
     self.table1_columns_nulls = []
     self.table2_columns_nulls = []
     self.mergeid_ON = []
     self.mergeid_ON_fields = ''
     self.mergeid_WHERE_TB1 = []
     self.mergeid_WHERE_TB1_fields = ''
     self.mergeid_WHERE_TB2 = []
     self.mergeid_WHERE_TB2_fields = ''
     self.mergeid_fields_TB1 = ''
     self.mergeid_fields_TB2 = ''
     self.common_columns_fields_TB1 = ''
     self.common_columns_fields_TB2 = ''
     self.table1_columns_fields = ''
     self.table2_columns_fields = ''
     self.table_columns_fields = ''
     self.table1_columns_fields_nulls = ''
     self.table2_columns_fields_nulls = ''
     self.output_table = output_table
     drop_table_if_exists( 'SAS_MERGE_1', self.db)
     drop_table_if_exists( 'SAS_MERGE_2', self.db)
     drop_table_if_exists( 'SAS_MERGE_3', self.db)
     drop_table_if_exists(self.output_table, self.db)
Beispiel #13
0
    def test_mergespec_file_invalid_format(self):
        self._log.info('test_mergespec_file_invalid_format() - testing...')
        valid_ds_input_file = os.path.join(self._run_context.tests_safe_dir,
                                           DS_INPUT_FILE_VALID)
        dbutilities.drop_table_if_exists(
            db_context=self._run_context.getDBContext(),
            table=VALID_DS_INPUT_TABLE_NAME)
        dbutilities.drop_table_if_exists(
            db_context=self._run_context.getDBContext(),
            table=VALID_DS_OUTPUT_TABLE_NAME)
        input_table_reader = SafeExcelReader(
            run_context=self._run_context,
            filename=valid_ds_input_file,
            sheet_name=DS_INPUT_FILE_SHEET_VALID,
            db_context=self._run_context.getDBContext(),
            output_table=VALID_DS_INPUT_TABLE_NAME)
        input_table_reader.createTable()
        mergespec_file = os.path.join(self._run_context.tests_safe_dir,
                                      MERGESPEC_FILE_MISSING_COLUMN)
        input_table_names = {FLAT_TABLE_KEY_NAME: VALID_DS_INPUT_TABLE_NAME}
        output_table_names = {FLAT_TABLE_KEY_NAME: VALID_DS_OUTPUT_TABLE_NAME}
        cmpmrg1 = ComplementaryMerge(run_context=self._run_context)
        with self.assertRaises(EOFError):
            cmpmrg1.complementary_merge(mergespec_file=mergespec_file,
                                        input_table_names=input_table_names,
                                        output_table_names=output_table_names,
                                        force_one_only=True)

        mergespec_file = os.path.join(self._run_context.tests_safe_dir,
                                      MERGESPEC_FILE_INVALID_COLUMN)
        cmpmrg2 = ComplementaryMerge(run_context=self._run_context)
        with self.assertRaises(ValueError):
            cmpmrg2.complementary_merge(mergespec_file=mergespec_file,
                                        input_table_names=input_table_names,
                                        output_table_names=output_table_names,
                                        force_one_only=True)
        self._log.info('test_mergespec_file_invalid_format() - tested.')

        mergespec_file = os.path.join(self._run_context.tests_safe_dir,
                                      MERGESPEC_FILE_NO_VARIABLE_PRIORITY)
        cmpmrg2 = ComplementaryMerge(run_context=self._run_context)
        with self.assertRaises(ValueError):
            cmpmrg2.complementary_merge(mergespec_file=mergespec_file,
                                        input_table_names=input_table_names,
                                        output_table_names=output_table_names,
                                        force_one_only=True)
        self._log.info('test_mergespec_file_invalid_format() - tested.')
Beispiel #14
0
    def process(self):
        print 'HandScore_check_module_started'
        try:
            for eachlist in subject_col_mapping.values():
                drop_table_if_exists(eachlist[5], self.dbcontext)
                drop_table_if_exists(eachlist[6], self.dbcontext)
                drop_table_if_exists(eachlist[5] + '_intr', self.dbcontext)
                drop_table_if_exists(eachlist[6] + '_intr', self.dbcontext)


#             self._process_sa_items()
            self._process_er_items()
        except Exception as error:
            print 'Error=', error
Beispiel #15
0
 def test_ds_output_already_exists(self):
     self._log.info('test_ds_output_already_exists() - testing...')
     mergespec_file = os.path.join(self._run_context.tests_safe_dir,
                                   MERGESPEC_FILE_VALID)
     valid_ds_input_file = os.path.join(self._run_context.tests_safe_dir,
                                        DS_INPUT_FILE_VALID)
     dbutilities.drop_table_if_exists(
         db_context=self._run_context.getDBContext(),
         table=VALID_DS_INPUT_TABLE_NAME)
     input_table_reader = SafeExcelReader(
         run_context=self._run_context,
         filename=valid_ds_input_file,
         sheet_name=DS_INPUT_FILE_SHEET_VALID,
         db_context=self._run_context.getDBContext(),
         output_table=VALID_DS_INPUT_TABLE_NAME)
     input_table_reader.createTable()
     input_table_names = {FLAT_TABLE_KEY_NAME: VALID_DS_INPUT_TABLE_NAME}
     output_table_names = {FLAT_TABLE_KEY_NAME: VALID_DS_INPUT_TABLE_NAME}
     cmpmrg = ComplementaryMerge(run_context=self._run_context)
     with self.assertRaises(IOError):
         cmpmrg.complementary_merge(mergespec_file=mergespec_file,
                                    input_table_names=input_table_names,
                                    output_table_names=output_table_names)
     self._log.info('test_ds_output_already_exists() - tested.')
    def test_21(self):
        answer_dir = os.path.join(self.answer_dir, 'test_21')
        if not os.path.exists(answer_dir):
            os.makedirs(answer_dir)
        specimen_dir = os.path.join(self.specimen_dir, 'ttest_test_21')
        result = True

        with self.read_g3() as g3, \
             self.read_oat_agg_sheet() as agg_sheet, \
             dbutilities.get_temp_table( self.db_context ) as tmp, \
             dbutilities.get_temp_table( self.db_context ) as tmp_agg :

            # As near as I can tell, the SAS test only runs for the tenth row of the agg sheet.
            # self.db_context.executeNoResults("DELETE FROM {agg_sheet} WHERE [import_order] != 10".format( agg_sheet=agg_sheet ))

            # We are just using this TTest instance to read the aggregation sheet. The actual ttest will use
            # another instance based on a truncated aggregation sheet.
            agg_sheet_reader = TTest(g3, self.db_context, agg_sheet, None,
                                     False)
            agg_sheet_reader.readAggData()

            assert dbutilities.table_exists(g3)

            targetParentRow = []
            for target_level in agg_sheet_reader.target_levels:
                for parent_level in target_level.contents:
                    for row in parent_level.contents:
                        targetParentRow.append(
                            (target_level, parent_level, row))

            targetParentRow.sort(key=lambda (row): row[2].import_order)

            for target_level, parent_level, row in targetParentRow:
                where_t = target_level.get_where_expression()
                target_id = target_level.id
                where_p = parent_level.get_where_expression()
                parent_id = parent_level.id
                i = row.import_order

                # Reduce the data to the desired sample
                dbutilities.drop_table_if_exists(tmp)
                query = """
                SELECT {vars},
                        COUNT( {input_var} ) OVER( PARTITION BY {parent_id}, {target_id} ) AS n_target,
                        0 AS n_parent
                INTO {tmp}
                FROM {g3}
                WHERE {where_t}
                """.format(parent_id=parent_id,
                           target_id=target_id,
                           input_var=row.inputvar,
                           where_t=where_t,
                           tmp=tmp,
                           g3=g3,
                           vars=Joiner(g3))
                self.db_context.executeNoResults(query)
                query = """
                UPDATE {tmp} SET n_parent = A.B FROM (
                    SELECT n_parent, COUNT( {input_var} ) OVER( PARTITION BY {parent_id} ) AS B
                    FROM {tmp}
                    WHERE {where_p}
                ) AS A
                """.format(parent_id=parent_id,
                           input_var=row.inputvar,
                           where_p=where_p,
                           tmp=tmp)
                print query
                self.db_context.executeNoResults(query)
                query = "DELETE FROM {tmp} WHERE ( n_parent != 2 ) OR ( n_target != 1 )".format(
                    tmp=tmp)
                self.db_context.executeNoResults(query)
                n_obs = dbutilities.n_obs(tmp)
                if n_obs > 0:

                    # Reduce the aggregation sheet to the current row
                    query = "SELECT * INTO {tmp_agg} FROM {agg_sheet} WHERE [import_order]={i}".format(
                        tmp_agg=tmp_agg, agg_sheet=agg_sheet, i=i)
                    self.db_context.executeNoResults(query)

                    # Do the ttest
                    ttester = TTest(tmp, self.db_context, tmp_agg, None, False)
                    ttester.readAggData()
                    ttester.execute()

                    # Check the answer
                    answer_file = os.path.join(
                        answer_dir, 'row_{0}_comparison.log'.format(i))
                    specimen_file = os.path.join(
                        specimen_dir, 'test_21_ttest_{0}.xls'.format(i))
                    result_i = self.compare_output(specimen_file, target_level,
                                                   answer_file)
                    result = result and result_i
                    print "{1} ttest test_21 for {0}".format(
                        i, 'PASSED' if result_i else 'FAILED')

                    self.assertTrue(result, "TTest Test 21 FAILED")
                    return
Beispiel #17
0
 def _districtnames(self):
     print " Create DISTRICTNAMES_ASCII"
     self._alldistricts()
     self.table_name = 'DISTRICTNAMES_ASCII'
     drop_table_if_exists( 'DISTRICTNAMES_ASCII', self.db)
     self.db.executeNoResults(sqls['DISTRICTNAMES'])
def populate_districtnames(dbcontext):
    print "Create DISTRICTNAMES_ASCII"
    drop_table_if_exists('DISTRICTNAMES_ASCII', dbcontext)
    dbcontext.executeNoResults(sqls['DISTRICTNAMES'])
Beispiel #19
0
 def _alldistricts(self):
     """ This is a temporary table referred while creating the alldistricts table"""
     print " Create ALLDISTRICTS_ASCII"
     self.table_name = 'ALLDISTRICTS_ASCII'
     drop_table_if_exists( 'ALLDISTRICTS_ASCII', self.db)
     self.db.executeNoResults(sqls['ALLDISTRICTS'])
Beispiel #20
0
 def _school_intervention_jd(self):
     print " Create SCHOOL_INTERVENTION_JD_ASCII"
     self.table_name = 'SCHOOL_INTERVENTION_JD_ASCII'
     drop_table_if_exists( 'SCHOOL_INTERVENTION_JD_ASCII', self.db)
     self.db.executeNoResults(sqls['SCHOOL_INTERVENTION_JD'])
Beispiel #21
0
 def __init__(self,
              runcontext,
              dbcontext,
              layoutfile='',
              inputfile='',
              patterns=[],
              OEpatterns=[],
              flat_tablename='',
              debug=False,
              bulk_insert=True,
              errorfile='',
              outputsdir='c:/SAS/OGT/Joboutput/',
              table_names='TABLE_NAMES',
              output_table='Layout_Temp',
              lock=None,
              batchfile='bcpfile.bat',
              row_count=1):
     """This is the intialization step and it needs the following inputs
     to run
     runcontext = creates the logger methods
     dbcontext  = creates the db connection objects
     layoutfile = layoutfile that will be this process
     inputfile  = inputfile that will be this process
     pattern = pattern to identify the MC item fields
     OEpattern = pattern to identify the OE item fields
     flat_tablename = The flat tablename that will be created
     debuf = FALSE(default), this will print all the debug messages in the console
     bulk_insert = Indicator to specify to perform bulk inserts
     errorfile = This file will contain all the errors while processing
     outputsdir = All the outputs will be listed in this directory, If the directory does not exists it will be created.
     
     """
     self.lock = lock
     self.output_table = output_table
     self.RC = runcontext
     self.db = dbcontext
     self.logger = self.RC.get_logger("preqc")
     self.layoutfile = layoutfile
     self.inputfile = inputfile
     self.errorfile = errorfile
     self.outputrec = {}
     self.layoutdict = []
     self.maxmindict = []
     self.recodingsdict = {}
     self.reset_counter = 1
     self.patterns = patterns
     self.OEpatterns = OEpatterns
     self.OE_items_tables_collist = []
     self.OE_items_pos_tables_collist = []
     self.mc_items_tables_collist = []
     self.mc_items_table_names = []
     self.mc_items_table_field_names = {}
     self.item_tables_columns = []
     self.all_columns = []
     self.tablename = ''
     self.item_table = False
     self.debug = debug
     #Missing_column should be a Ordereddict as we need to add the MISSING Columns
     self.missing_columns = OrderedDict()
     self.tablenames = []
     self.column_names = []
     self.flat_tablename = flat_tablename
     self.bulkinsert = bulk_insert
     self.bulk_insert_list = []
     self.row_count = row_count
     self.t1 = datetime.datetime.now()
     self.t2 = datetime.datetime.now()
     if not os.path.exists(outputsdir):
         os.makedirs(outputsdir)
     self.f = open('c:/OGTValidation.txt', "w")
     self.error_file = open(self.errorfile, 'w')
     self.insert_rows = []
     self.file_lists = []
     self.field_ind = 0
     self.outputs_dir = outputsdir
     self.pre_flat_qc_table_file = open(
         self.outputs_dir + self.flat_tablename + '.txt', 'w')
     self.batchfilename = batchfile
     self.batchfile = open(self.outputs_dir + batchfile, 'w')
     self.statsfile = open(self.outputs_dir + 'loadstats.txt', 'w')
     open(self.outputs_dir + 'error.txt', 'w')
     batch_file_string = 'bcp ' + self.db.db + '.dbo.' +  self.flat_tablename +  ' in ' + self.outputs_dir +  self.flat_tablename + '.txt' \
             + ' -t "&!!" -c -S ' + self.db.server + ' -T  -e ' + self.outputs_dir + 'error.txt'
     self.batchfile.write(batch_file_string + '\n')
     qry = "CREATE TABLE " + table_names + " (subject_id char(5),tablename nvarchar(50) )"
     drop_table_if_exists(table_names, self.db)
     self.db.executeNoResults(qry)
     i = 0
     for each in self.patterns:
         qry = "Insert into " + table_names + " values ('{0}','{1}')".format(
             each[4], each[3])
         self.db.executeNoResults(qry)
         i += 1
     qry = "Insert into " + table_names + " values ('{0}','{1}')".format(
         'F', self.flat_tablename)
     self.db.executeNoResults(qry)
Beispiel #22
0
def sql_2(dbcontext):
    print ' dbcontext executing_2 = ', dbcontext
    drop_table_if_exists( 'employee_2_test', dbcontext)
    dbcontext.executeNoResults("select * into employee_2_test from employee1")
Beispiel #23
0
 def _housekeeping(self):
     qry = "CREATE TABLE FILE_CONTROL_TABLE (id int,FILENAME nvarchar( 100) )"
     drop_table_if_exists('FILE_CONTROL_TABLE', self.db)
     self.db.executeNoResults(qry)
     print 'Housekeeping done'
Beispiel #24
0
 def _allschools_jd(self):
     print " Create ALLSCHOOLS_JD_ASCII"
     self.table_name = 'ALLSCHOOLS_JD_ASCII'
     drop_table_if_exists( 'ALLSCHOOLS_JD_ASCII', self.db)
     self.db.executeNoResults(sqls['ALLSCHOOLS_JD'])
Beispiel #25
0
 def _studenthomedistrict(self):
     print " Create STUDENTHOMEDISTRICT_ASCII"
     self.table_name = 'STUDENTHOMEDISTRICT_ASCII'
     drop_table_if_exists( 'STUDENTHOMEDISTRICT_ASCII', self.db)
     self.db.executeNoResults(sqls['STUDENTHOMEDISTRICT'])