def generate_conf(guid_batch, phase_number, load_type, tenant_code, target_schema):
    """
    Return all needed configuration information
    :param guid_batch: the guid for the batch
    :param phase_number: the current number of the phase
    :param load_type: type of load. ie. assessment
    :param tenant_code: the tenants 2 letter code
    :return: A dictionary of the config details
    """
    db_params_tuple = get_db_connection_params(udl2_conf['udl2_db_conn']['url'])

    conf = {
        # add guid_batch from msg
        mk.GUID_BATCH: guid_batch,
        # source schema
        mk.SOURCE_DB_SCHEMA: udl2_conf['udl2_db_conn']['db_schema'],
        # source database setting
        mk.SOURCE_DB_DRIVER: db_params_tuple[0],
        mk.SOURCE_DB_USER: db_params_tuple[1],
        mk.SOURCE_DB_PASSWORD: db_params_tuple[2],
        mk.SOURCE_DB_HOST: db_params_tuple[3],
        mk.SOURCE_DB_PORT: db_params_tuple[4],
        mk.SOURCE_DB_NAME: db_params_tuple[5],
        mk.SOURCE_DB_TABLE: Constants.UDL2_JSON_INTEGRATION_TABLE(load_type),
        mk.TARGET_DB_SCHEMA: target_schema,
        mk.REF_TABLE: Constants.UDL2_REF_MAPPING_TABLE(load_type),
        mk.PHASE: int(phase_number),
        mk.LOAD_TYPE: load_type,
        mk.TENANT_NAME: tenant_code,
        mk.PROD_DB_SCHEMA: udl2_conf['prod_db_conn'][tenant_code]['db_schema']
    }
    return conf
Exemplo n.º 2
0
def load_stored_proc():
    '''
    Generate and load the stored procedures to be used for transformations and
    validations into the database.
    @param udl2_conf: The configuration dictionary for
    '''
    populate_stored_proc(Constants.UDL2_REF_MAPPING_TABLE(Constants.LOAD_TYPE_ASSESSMENT),
                         Constants.UDL2_REF_MAPPING_TABLE(Constants.LOAD_TYPE_STUDENT_REGISTRATION))
Exemplo n.º 3
0
def load_reference_data():
    '''
    load the reference data into the reference tables
    @param udl2_conf: The configuration dictionary for
    '''
    asmt_ref_table_info = ref_table_data.ref_table_conf
    populate_ref_column_map(asmt_ref_table_info, Constants.UDL2_REF_MAPPING_TABLE(Constants.LOAD_TYPE_ASSESSMENT))

    sr_ref_table_info = sr_ref_table_data.ref_table_conf
    populate_ref_column_map(sr_ref_table_info, Constants.UDL2_REF_MAPPING_TABLE(Constants.LOAD_TYPE_STUDENT_REGISTRATION))
def get_content_validator_conf(guid_batch, load_type):
    udl_db_conn = udl2_conf.get(Constants.UDL2_DB_CONN)
    conf = {
        mk.SOURCE_DB_SCHEMA: udl_db_conn.get(Constants.DB_SCHEMA),
        mk.ASMT_TABLE: Constants.UDL2_JSON_INTEGRATION_TABLE(load_type),
        mk.ASMT_OUTCOME_TABLE: Constants.UDL2_STAGING_TABLE(load_type),
        mk.GUID_BATCH: guid_batch,
        mk.LOAD_TYPE: load_type
    }
    return conf
Exemplo n.º 5
0
 def test_load_stage_to_int_student_registration(self):
     guid_batch = str(uuid4())
     load_type = Constants.LOAD_TYPE_STUDENT_REGISTRATION
     conf = self.generate_conf_for_moving_from_stg_to_int(guid_batch, load_type)
     self.udl2_conf['guid_batch'] = guid_batch
     self.load_file_to_stage(os.path.join('student_registration_data', 'test_stu_reg_without_headers.csv'),
                             os.path.join('student_registration_data', 'test_stu_reg_header.csv'),
                             load_type, Constants.UDL2_STAGING_TABLE(load_type), guid_batch)
     move_data_from_staging_to_integration(conf)
     postloading_total = self.postloading_count(Constants.UDL2_INTEGRATION_TABLE(load_type))
     self.assertEqual(10, postloading_total)
def init_udl_tenant_sequences(udl2_conf):
    # Create and sync sequence for each tenant on udl database if it doesn't exist
    with get_udl_connection() as udl_conn:
        all_tenants = udl2_conf.get(PRODUCTION_NAMESPACE)
        udl_schema_name = udl2_conf.get(UDL_NAMESPACE).get(Constants.DB_SCHEMA)
        # dict to keep track of tenant sequence values for each tenant defined in the ini
        all_tenant_sequences = {}
        for tenant in all_tenants:
            tenant_seq_name = Constants.TENANT_SEQUENCE_NAME(tenant)
            tenant_schema_name = all_tenants.get(tenant).get(Constants.DB_SCHEMA)
            # unique identifier for each tenant
            key = all_tenants.get(tenant).get(Constants.URL) + ':' + tenant_schema_name
            # check if we have already visited the tenant prod schema
            if not key in all_tenant_sequences:
                with get_prod_connection(tenant) as prod_conn:
                    prod_seq_result = prod_conn.execute(text("select nextval(\'{schema_name}.{seq_name} \')".
                                                             format(schema_name=tenant_schema_name,
                                                                    seq_name=Constants.SEQUENCE_NAME)))
                    all_tenant_sequences[key] = prod_seq_result.fetchone()[0]
            # check if the global tenant sequence exists in udl database
            if not sequence_exists(udl_conn, tenant_seq_name):
                # create sequence if does not exist
                udl_conn.execute(CreateSequence(Sequence(name=tenant_seq_name, increment=1)))
            # update and set the current val for the tenant sequence
            udl_conn.execute(text("select setval(\'{schema_name}.{seq_name} \', {value}, {called})".
                                  format(schema_name=udl_schema_name, seq_name=tenant_seq_name,
                                         value=all_tenant_sequences[key], called=True)))
Exemplo n.º 7
0
def import_via_fdw(conn, stg_columns, csv_table_columns, transformation_rules,
                   apply_rules, staging_schema, staging_table, csv_schema,
                   csv_table, start_seq, tenant_name):
    '''
    Load data from foreign table to staging table
    '''
    # create sequence name, use table_name and a random number combination.
    # This sequence is used for column src_file_rec_num
    seq_name = (csv_table + '_' + str(start_seq)).lower()

    global_tenant_seq_name = Constants.TENANT_SEQUENCE_NAME(tenant_name)

    # query 1 -- create query to create sequence
    create_sequence = queries.create_sequence_query(staging_schema, seq_name,
                                                    start_seq)
    # query 2 -- create query to load data from fdw to staging table
    insert_into_staging_table = queries.create_inserting_into_staging_query(
        stg_columns, apply_rules, csv_table_columns, staging_schema,
        staging_table, csv_schema, csv_table, seq_name, global_tenant_seq_name,
        transformation_rules)
    # query 3 -- create query to drop sequence
    drop_sequence = queries.drop_sequence_query(staging_schema, seq_name)
    # logger.debug('@@@@@@@', insert_into_staging_table)

    # execute 3 queries in order
    execute_udl_queries(
        conn, [create_sequence, insert_into_staging_table, drop_sequence],
        'Exception in loading data -- ', 'file_loader', 'import_via_fdw')
Exemplo n.º 8
0
def get_source_target_column_values_from_ref_column_mapping(source_table, load_type):
    """
    return all data for source_column from ref_column_mapping based on source_table value passed
    :param: source_table: source_table values to query for
    :return: A set containing all the columns expected in a source csv file
    """
    with get_udl_connection() as conn:
        table_meta = conn.get_table(Constants.UDL2_REF_MAPPING_TABLE(load_type))
        select_object = select([table_meta]).where(table_meta.c.source_table == source_table)
        return [(row['target_column'], row['source_column']) for row in conn.execute(select_object)]
def generate_conf_for_loading(file_to_load, start_seq, load_type,
                              header_file_path, guid_batch, tenant_name):
    csv_table = extract_file_name(file_to_load)
    conf = {
        mk.FILE_TO_LOAD: file_to_load,
        mk.ROW_START: start_seq,
        mk.HEADERS: header_file_path,
        mk.CSV_SCHEMA: udl2_conf['udl2_db_conn']['db_schema'],
        mk.CSV_TABLE: csv_table,
        mk.FDW_SERVER: Constants.UDL2_FDW_SERVER,
        mk.TARGET_DB_SCHEMA: udl2_conf['udl2_db_conn']['db_schema'],
        mk.TARGET_DB_TABLE: Constants.UDL2_STAGING_TABLE(load_type),
        mk.APPLY_RULES: True,
        mk.REF_TABLE: Constants.UDL2_REF_MAPPING_TABLE(load_type),
        mk.CSV_LZ_TABLE: Constants.UDL2_CSV_LZ_TABLE,
        mk.GUID_BATCH: guid_batch,
        mk.TENANT_NAME: tenant_name
    }
    return conf
Exemplo n.º 10
0
def task(msg):
    start_time = datetime.datetime.now()
    logger.info(
        "LOAD_SR_INTEGRATION_TO_TARGET: Migrating data from SR integration tables to target tables."
    )
    guid_batch = msg[mk.GUID_BATCH]
    load_type = msg[mk.LOAD_TYPE]

    source_tables = [
        Constants.UDL2_INTEGRATION_TABLE(load_type),
        Constants.UDL2_JSON_INTEGRATION_TABLE(load_type)
    ]
    target_table = Constants.SR_TARGET_TABLE

    target_schema = msg[
        mk.TARGET_DB_SCHEMA] if mk.TARGET_DB_SCHEMA in msg else msg[
            mk.GUID_BATCH]
    conf = generate_conf(guid_batch, msg[mk.PHASE], load_type,
                         msg[mk.TENANT_NAME], target_schema)
    affected_rows = move_data_from_int_tables_to_target_table(
        conf, task.name, source_tables, target_table)

    end_time = datetime.datetime.now()

    # benchmark
    benchmark = BatchTableBenchmark(guid_batch,
                                    load_type,
                                    task.name,
                                    start_time,
                                    end_time,
                                    task_id=str(task.request.id),
                                    working_schema="",
                                    size_records=affected_rows[0],
                                    tenant=msg[mk.TENANT_NAME])
    benchmark.record_benchmark()

    notification_data = {mk.TOTAL_ROWS_LOADED: affected_rows[0]}
    merge_to_udl2stat_notification(guid_batch, notification_data)
    outgoing_msg = {}
    outgoing_msg.update(msg)
    outgoing_msg.update(notification_data)
    return outgoing_msg
Exemplo n.º 11
0
 def load_config(self, type):
     if type == 'assessment':
         self.conf[mk.TARGET_DB_TABLE] = 'stg_sbac_asmt_outcome'
         self.conf[mk.REF_TABLE] = Constants.UDL2_REF_MAPPING_TABLE(
             'assessment')
         self.conf[mk.CSV_TABLE] = 'test_csv_table'
         self.conf[mk.TENANT_NAME] = 'cat'
         self.conf[mk.FILE_TO_LOAD] = self.get_csv_file(
             'test_file_realdata.csv')
         self.conf[mk.HEADERS] = self.get_csv_file('test_file_headers.csv')
     elif type == 'studentregistration':
         self.conf[mk.TARGET_DB_TABLE] = 'stg_sbac_stu_reg'
         self.conf[mk.REF_TABLE] = Constants.UDL2_REF_MAPPING_TABLE(
             'studentregistration')
         self.conf[mk.TENANT_NAME] = 'cat'
         self.conf[mk.CSV_TABLE] = 'test_stu_reg_csv_table'
         self.conf[mk.FILE_TO_LOAD] = self.get_csv_file(
             'student_registration_data/test_sample_student_reg.csv')
         self.conf[mk.HEADERS] = self.get_csv_file(
             'student_registration_data/test_stu_reg_header.csv')
Exemplo n.º 12
0
def get_assessment_type(json_file_dir):
    """
    Get the assessment type for the UDL job from the json file
    @param json_file_dir: A directory that houses the json file
    @return: UDL job assessment type
    @rtype: string
    """
    assessment_types = Constants.ASSESSMENT_TYPES()
    assessment_type = get_value_from_json(json_file_dir,
                                          Constants.ASSESSMENT_TYPE_KEY)
    if assessment_type not in assessment_types:
        raise ValueError('No valid load type specified in json file --')
    return assessment_type
    def verify_json_load(self, load_type, conf, columns, guid):
        load_json(conf)

        sr_int_table = self.udl2_conn.get_table(
            Constants.UDL2_JSON_INTEGRATION_TABLE(load_type))
        query = select(['*'], sr_int_table.c.guid_batch == guid)
        result = self.udl2_conn.execute(query).fetchall()
        for row in result:
            self.assertEqual(len(row), len(columns),
                             'Unexpected number of columns')
            for column in columns:
                self.assertTrue(row[column],
                                'Expected column does not have data')
 def generate_config(self, load_type, file, guid):
     results = sfv_util.get_source_target_column_values_from_ref_column_mapping(
         Constants.UDL2_JSON_LZ_TABLE, load_type)
     conf = {
         mk.GUID_BATCH: guid,
         mk.FILE_TO_LOAD: file,
         mk.MAPPINGS:
         dict([(row[0], row[1].split('.')) for row in results]),
         mk.TARGET_DB_TABLE:
         Constants.UDL2_JSON_INTEGRATION_TABLE(load_type),
         mk.TARGET_DB_SCHEMA: udl2_conf['udl2_db_conn']['db_schema'],
         mk.TENANT_NAME: 'cat'
     }
     return conf
Exemplo n.º 15
0
    def generate_conf_for_moving_from_stg_to_int(self, guid_batch, load_type):
        db_params_tuple = get_db_connection_params(self.udl2_conf['udl2_db_conn']['url'])
        conf = {
            mk.GUID_BATCH: guid_batch,

            # source database setting
            mk.SOURCE_DB_DRIVER: db_params_tuple[0],
            mk.SOURCE_DB_USER: db_params_tuple[1],
            mk.SOURCE_DB_PASSWORD: db_params_tuple[2],
            mk.SOURCE_DB_HOST: db_params_tuple[3],
            mk.SOURCE_DB_PORT: db_params_tuple[4],
            mk.SOURCE_DB_NAME: db_params_tuple[5],
            mk.SOURCE_DB_SCHEMA: self.udl2_conf['udl2_db_conn']['db_schema'],
            mk.SOURCE_DB_TABLE: Constants.UDL2_STAGING_TABLE(load_type),

            # target database setting
            mk.TARGET_DB_SCHEMA: self.udl2_conf['udl2_db_conn']['db_schema'],
            mk.TARGET_DB_TABLE: Constants.UDL2_INTEGRATION_TABLE(load_type),

            mk.REF_TABLE: Constants.UDL2_REF_MAPPING_TABLE(load_type),
            mk.ERROR_DB_SCHEMA: self.udl2_conf['udl2_db_conn']['db_schema'],
            mk.ERR_LIST_TABLE: Constants.UDL2_ERR_LIST_TABLE
        }
        return conf
def generate_conf_for_loading(json_file, guid_batch, load_type, tenant_name):
    '''
    takes the msg and pulls out the relevant parameters to pass
    the method that loads the json
    '''
    results = sfv_util.get_source_target_column_values_from_ref_column_mapping(
        Constants.UDL2_JSON_LZ_TABLE, load_type)
    conf = {
        mk.FILE_TO_LOAD: json_file,
        mk.MAPPINGS: dict([(row[0], row[1].split('.')) for row in results]),
        mk.TARGET_DB_SCHEMA: udl2_conf['udl2_db_conn']['db_schema'],
        mk.TARGET_DB_TABLE: Constants.UDL2_JSON_INTEGRATION_TABLE(load_type),
        mk.GUID_BATCH: guid_batch,
        mk.TENANT_NAME: tenant_name
    }
    return conf
Exemplo n.º 17
0
 def load_file_to_stage(self, data_file, header_file, load_type, staging_table, guid):
     # file contain 30 rows
     conf = {
         mk.FILE_TO_LOAD: os.path.join(self.data_dir, data_file),
         mk.HEADERS: os.path.join(self.data_dir, header_file),
         mk.CSV_TABLE: 'csv_table_for_file_loader',
         mk.CSV_SCHEMA: self.udl2_conf['udl2_db_conn']['db_schema'],
         mk.REF_TABLE: Constants.UDL2_REF_MAPPING_TABLE(load_type),
         mk.CSV_LZ_TABLE: Constants.UDL2_CSV_LZ_TABLE,
         mk.FDW_SERVER: 'udl2_fdw_server',
         mk.TARGET_DB_SCHEMA: self.udl2_conf['udl2_db_conn']['db_schema'],
         mk.TARGET_DB_TABLE: staging_table,
         mk.APPLY_RULES: False,
         mk.ROW_START: 10,
         mk.GUID_BATCH: guid,
         mk.TENANT_NAME: 'cat'
     }
     load_file(conf)
     with get_udl_connection() as conn:
         _table = conn.get_table(staging_table)
         update_stmt = update(_table).values(record_sid=1000 + _table.c.src_file_rec_num - 1).\
             where(_table.c.guid_batch == guid)
         conn.execute(update_stmt)
    def setUp(self):
        try:
            config_path = dict(os.environ)['UDL2_CONF']
        except Exception:
            config_path = UDL2_DEFAULT_CONFIG_PATH_FILE

        conf_tup = read_ini_file(config_path)
        udl2_conf = conf_tup[0]
        initialize_db_udl(udl2_conf)
        self.ref_schema = udl2_conf['udl2_db_conn']['db_schema']
        self.ref_table_name = Constants.UDL2_REF_MAPPING_TABLE(
            Constants.LOAD_TYPE_ASSESSMENT)

        # Testable Rules
        self.rule_names = transform_rules.keys()
        self.rule_conf = transform_rules
        self.rule_list = transformation_code_generator.generate_transformations(
            self.rule_names, rule_conf=self.rule_conf)
        self.testable_rules = []
        for rule in self.rule_list:
            self.testable_rules.append(rule[0])

        test_rows = []
        for rule in self.testable_rules:
            ins_dict = {
                'phase': -999,
                'source_table': 'ftest_table',
                'source_column': 'ftest_column',
                'target_table': 'ftest_table1',
                'target_column': 'ftest_column1',
                'transformation_rule': rule,
            }
            test_rows.append(ins_dict)
        with get_udl_connection() as conn:
            self.ref_table = conn.get_table(self.ref_table_name)
            conn.execute(self.ref_table.insert(test_rows))
Exemplo n.º 19
0
 def test_all_lambda_constants(self):
     self.assertEqual(len(Constants.LOAD_TYPES()), 2)
     self.assertEqual(Constants.LOAD_TYPES(), [
         Constants.LOAD_TYPE_ASSESSMENT,
         Constants.LOAD_TYPE_STUDENT_REGISTRATION
     ])
     self.assertEqual(len(Constants.ASSESSMENT_TYPES()), 3)
     self.assertEqual(Constants.ASSESSMENT_TYPES(), [
         Constants.ASSESSMENT_TYPE_SUMMATIVE,
         Constants.ASSESSMENT_TYPE_INTERIM_COMPREHENSIVE,
         Constants.ASSESSMENT_TYPE_INTERIM_ASSESSMENT_BLOCKS
     ])
     self.assertEqual(
         Constants.UDL2_STAGING_TABLE(Constants.LOAD_TYPE_ASSESSMENT),
         Constants.STG_ASMT_OUT_TABLE)
     self.assertEqual(
         Constants.UDL2_STAGING_TABLE(
             Constants.LOAD_TYPE_STUDENT_REGISTRATION),
         Constants.STG_SR_TABLE)
     self.assertEqual(
         Constants.UDL2_INTEGRATION_TABLE(Constants.LOAD_TYPE_ASSESSMENT),
         Constants.INT_ASMT_OUT_TABLE)
     self.assertEqual(
         Constants.UDL2_INTEGRATION_TABLE(
             Constants.LOAD_TYPE_STUDENT_REGISTRATION),
         Constants.INT_SR_TABLE)
     self.assertEqual(
         Constants.UDL2_JSON_INTEGRATION_TABLE(
             Constants.LOAD_TYPE_ASSESSMENT), Constants.INT_ASMT_TABLE)
     self.assertEqual(
         Constants.UDL2_JSON_INTEGRATION_TABLE(
             Constants.LOAD_TYPE_STUDENT_REGISTRATION),
         Constants.INT_SR_META_TABLE)
     self.assertEqual(
         Constants.UDL2_REF_MAPPING_TABLE(Constants.LOAD_TYPE_ASSESSMENT),
         Constants.ASMT_REF_TABLE)
     self.assertEqual(
         Constants.UDL2_REF_MAPPING_TABLE(
             Constants.LOAD_TYPE_STUDENT_REGISTRATION),
         Constants.SR_REF_TABLE)
     self.assertEqual(Constants.TENANT_SEQUENCE_NAME('cat'),
                      Constants.SEQUENCE_NAME + '_' + 'cat')
     self.assertEqual(Constants.TENANT_SEQUENCE_NAME(''), None)
     self.assertEqual(Constants.TENANT_SEQUENCE_NAME(None), None)
Exemplo n.º 20
0
 def test_get_column_mapping_from_stg_to_int(self):
     expected_target_columns = ['name_state', 'code_state', 'guid_district', 'name_district', 'guid_school', 'name_school',
                                'guid_student', 'external_ssid_student', 'name_student_first', 'name_student_middle', 'name_student_last',
                                'birthdate_student', 'sex_student', 'grade_enrolled', 'dmg_eth_hsp', 'dmg_eth_ami', 'dmg_eth_asn',
                                'dmg_eth_blk', 'dmg_eth_pcf', 'dmg_eth_wht', 'dmg_multi_race', 'dmg_prg_iep', 'dmg_prg_lep', 'dmg_prg_504', 'dmg_sts_ecd',
                                'dmg_sts_mig', 'code_language', 'eng_prof_lvl', 'us_school_entry_date', 'lep_entry_date',
                                'lep_exit_date', 't3_program_type', 'prim_disability_type', 'created_date', 'guid_batch']
     expected_source_columns_with_tran_rule = ['substr("A".name_state, 1, 50)', 'substr("A".code_state, 1, 2)', 'substr("A".guid_district, 1, 40)',
                                               'substr("A".name_district, 1, 60)', 'substr("A".guid_school, 1, 40)', 'substr("A".name_school, 1, 60)',
                                               'substr("A".guid_student, 1, 40)', 'substr("A".external_ssid_student, 1, 40)', 'substr("A".name_student_first, 1, 35)',
                                               'substr("A".name_student_middle, 1, 35)', 'substr("A".name_student_last, 1, 35)', 'substr("A".birthdate_student, 1, 10)',
                                               'substr("A".sex_student, 1, 10)', 'substr("A".grade_enrolled, 1, 2)',
                                               'case "A".dmg_eth_hsp when \'\' then null else cast("A".dmg_eth_hsp as bool) end',
                                               'case "A".dmg_eth_ami when \'\' then null else cast("A".dmg_eth_ami as bool) end',
                                               'case "A".dmg_eth_asn when \'\' then null else cast("A".dmg_eth_asn as bool) end',
                                               'case "A".dmg_eth_blk when \'\' then null else cast("A".dmg_eth_blk as bool) end',
                                               'case "A".dmg_eth_pcf when \'\' then null else cast("A".dmg_eth_pcf as bool) end',
                                               'case "A".dmg_eth_wht when \'\' then null else cast("A".dmg_eth_wht as bool) end',
                                               'case "A".dmg_multi_race when \'\' then null else cast("A".dmg_multi_race as bool) end',
                                               'case "A".dmg_prg_iep when \'\' then null else cast("A".dmg_prg_iep as bool) end',
                                               'case "A".dmg_prg_lep when \'\' then null else cast("A".dmg_prg_lep as bool) end',
                                               'case "A".dmg_prg_504 when \'\' then null else cast("A".dmg_prg_504 as bool) end',
                                               'case "A".dmg_sts_ecd when \'\' then null else cast("A".dmg_sts_ecd as bool) end',
                                               'case "A".dmg_sts_mig when \'\' then null else cast("A".dmg_sts_mig as bool) end',
                                               'substr("A".code_language, 1, 3)', 'substr("A".eng_prof_lvl, 1, 20)', 'substr("A".us_school_entry_date, 1, 10)',
                                               'substr("A".lep_entry_date, 1, 10)', 'substr("A".lep_exit_date, 1, 10)', 'substr("A".t3_program_type, 1, 27)',
                                               'substr("A".prim_disability_type, 1, 3)', '"A".created_date', '"A".guid_batch']
     with get_udl_connection() as conn:
         target_columns, source_columns_with_tran_rule = get_column_mapping_from_stg_to_int(conn,
                                                                                            Constants.UDL2_REF_MAPPING_TABLE(Constants.LOAD_TYPE_STUDENT_REGISTRATION),
                                                                                            'stg_sbac_stu_reg', 'int_sbac_stu_reg')
         self.assertEqual(expected_target_columns, target_columns)
         self.assertEqual(expected_source_columns_with_tran_rule, source_columns_with_tran_rule)
Exemplo n.º 21
0
def load_to_table(data_dict, guid_batch, int_table, tenant_name, udl_schema):
    '''
    Load the table into the proper table
    @param data_dict: the dictionary containing the data to be loaded
    @param guid_batch: the id for the batch
    @param int_table: the name of the integration table
    @param tenant_name: name of the tenant
    @param udl_schema: udl schema name
    '''
    # Create sqlalchemy connection and get table information from sqlalchemy
    ref_column_mapping_columns = {}
    with get_udl_connection() as conn:
        data_dict[mk.GUID_BATCH] = guid_batch
        data_dict = fix_empty_strings(data_dict)
        ref_table = conn.get_table('ref_column_mapping')
        s_int_table = conn.get_table(int_table)
        column_mapping_query = select(
            [ref_table.c.target_column, ref_table.c.stored_proc_name],
            from_obj=ref_table).where(
                and_(ref_table.c.source_table == 'lz_json',
                     ref_table.c.target_table == int_table))
        results = conn.get_result(column_mapping_query)
        for result in results:
            target_column = result['target_column']
            stored_proc_name = result['stored_proc_name']
            value = data_dict.get(target_column)
            if value:
                if stored_proc_name:
                    if stored_proc_name.startswith('sp_'):
                        ref_column_mapping_columns[
                            target_column] = stored_proc_name + '(' + QuotedString(
                                value if type(value) is str else str(value)
                            ).getquoted().decode('utf-8') + ')'
                    else:
                        format_value = dict()
                        format_value['value'] = QuotedString(
                            value if type(value) is str else str(value)
                        ).getquoted().decode('utf-8')
                        if s_int_table.c[target_column].type.python_type is str:
                            format_value['length'] = s_int_table.c[
                                target_column].type.length
                        ref_column_mapping_columns[
                            target_column] = stored_proc_name.format(
                                **format_value)
                    continue
            ref_column_mapping_columns[target_column] = value

        record_sid = 'nextval(\'{schema_name}.{tenant_sequence_name}\')'.\
            format(schema_name=udl_schema, tenant_sequence_name=Constants.TENANT_SEQUENCE_NAME(tenant_name))
        from_select_column_names = ['record_sid']
        from_select_select_values = [record_sid]
        for column in s_int_table.c:
            value = data_dict.get(column.name)
            if value is not None:
                from_select_column_names.append(column.name)
                from_select_select_values.append(
                    ref_column_mapping_columns.get(
                        column.name,
                        QuotedString(value if type(value) is str else str(
                            value)).getquoted().decode('utf-8')))
        insert_into_int_table = s_int_table.insert().from_select(
            from_select_column_names, select(from_select_select_values))
        # create insert statement and execute
        affected_row = db_util.execute_udl_queries(
            conn, [insert_into_int_table],
            'Exception in loading json data -- ', 'json_loader',
            'load_to_table')

    return affected_row[0]
Exemplo n.º 22
0
# Consortium as of August 1, 2014 are granted a worldwide, non-exclusive, fully
# paid-up, royalty-free, perpetual license, to access, use, execute, reproduce,
# display, distribute, perform and create derivative works of the software
# included in the Reporting Platform, including the source code to such software.
# This license includes the right to grant sublicenses by such consortium members
# to third party vendors solely for the purpose of performing services on behalf
# of such consortium member educational agencies.

import logging
from edudl2.json_util.json_util import get_value_from_json
from edudl2.udl2.constants import Constants

__author__ = 'tshewchuk'

logger = logging.getLogger(__name__)
load_types = Constants.LOAD_TYPES()


def get_load_type(json_file_dir):
    """
    Get the load type for this UDL job from the json file
    @param json_file_dir: A directory that houses the json file
    @return: UDL job load type
    @rtype: string
    """

    load_type = get_value_from_json(json_file_dir,
                                    Constants.LOAD_TYPE_KEY).lower()

    if load_type not in load_types:
        raise ValueError('No valid load type specified in json file --')