Ejemplo n.º 1
0
    print len(talias_dict)
    print talias_dict
    sys.exit(1)

# first check there are no identical field names
ttemp =[f['fname'] for f in fdict]
if len(ttemp) > len(set(ttemp)):
    print "ERROR: Duplicate field names found in dictionary"
    sys.exit(1)
else:
    fdict_lookup = {}
    for f in fdict:
        fdict_lookup[f['fname'].lower()] = f

# Get fields names from existing table
field_list = myspot.sql_get_cols(source_db, table_in)
field_list = [f.lower() for f in field_list]
print field_list

source_fields, target_fields = [], []
# for field in field_list:
#     if field not in talias_dict:
#         print "WARNING: %s not in field dictionary - will not be imported" % field
#         continue
#     fname = talias_dict[field]
#     fspec = fdict_lookup[fname]

#     source_fields.append(field)
#     target_fields.append(fname)

#     print field, fname
Ejemplo n.º 2
0
t_tab = args.target_table
o_tab = args.target_table + args.suffix_original
c_tab = args.target_table + args.suffix_index
print "\nOK: Running index_table.py for %s\n" % o_tab

cursor, connection = sql_connect(args.target_db, connection=True)

tdict = get_yaml_dict('table', local_dict=True)
fdict = get_yaml_dict('field', local_dict=True)
cdict = get_yaml_dict('corrections', local_dict=True)

tspec = get_tspec(tdict, t_tab)
pkeys = tspec['pkey']

# if there are excel sheet, row references then grab these as well
field_names = sql_get_cols(args.target_db, o_tab)
if 'modifiedat' in field_names:
    field_names.remove('modifiedat')

excel_source = False
if 'excel_sheet' in field_names and 'excel_row' in field_names:
    excel_source = True
#     stmt = """ SELECT excel_sheet, excel_row FROM %s """ % (o_tab)
#     cursor.execute(stmt)
#     row_keys = cursor.fetchall()
#     row_keys = ["sheet %s row %d" % (i[0], i[1]) for i in row_keys]
# else:
# assume sql_source

# CHANGED: 2012-09-07 - adds a unique row index to all tables
stmt = "ALTER TABLE %s DROP COLUMN tab_serial" % o_tab
Ejemplo n.º 3
0
t_tab = args.target_table
o_tab = args.target_table + args.suffix_original
c_tab = args.target_table + args.suffix_index
print "\nOK: Running index_table.py for %s\n" % o_tab

cursor, connection = sql_connect(args.target_db, connection=True)

tdict = get_yaml_dict('table', local_dict=True)
fdict = get_yaml_dict('field', local_dict=True)
cdict = get_yaml_dict('corrections', local_dict=True)

tspec = get_tspec(tdict, t_tab)
pkeys = tspec['pkey']

# if there are excel sheet, row references then grab these as well
field_names = sql_get_cols(args.target_db, o_tab)
if 'modifiedat' in field_names:
    field_names.remove('modifiedat')

excel_source = False
if 'excel_sheet' in field_names and 'excel_row' in field_names:
    excel_source = True
#     stmt = """ SELECT excel_sheet, excel_row FROM %s """ % (o_tab)
#     cursor.execute(stmt)
#     row_keys = cursor.fetchall()
#     row_keys = ["sheet %s row %d" % (i[0], i[1]) for i in row_keys]
# else:
    # assume sql_source

# CHANGED: 2012-09-07 - adds a unique row index to all tables
stmt = "ALTER TABLE %s DROP COLUMN tab_serial" % o_tab
Ejemplo n.º 4
0
    print len(talias_dict)
    print talias_dict
    sys.exit(1)

# first check there are no identical field names
ttemp = [f['fname'] for f in fdict]
if len(ttemp) > len(set(ttemp)):
    print "ERROR: Duplicate field names found in dictionary"
    sys.exit(1)
else:
    fdict_lookup = {}
    for f in fdict:
        fdict_lookup[f['fname'].lower()] = f

# Get fields names from existing table
field_list = myspot.sql_get_cols(source_db, table_in)
field_list = [f.lower() for f in field_list]
print field_list

source_fields, target_fields = [], []
# for field in field_list:
#     if field not in talias_dict:
#         print "WARNING: %s not in field dictionary - will not be imported" % field
#         continue
#     fname = talias_dict[field]
#     fspec = fdict_lookup[fname]

#     source_fields.append(field)
#     target_fields.append(fname)

#     print field, fname
Ejemplo n.º 5
0
            sh_dict.write(i + tab_offset, 3, tab_label)
            next_offset = i + tab_offset

        tab_offset = next_offset + 1

    dvr_book.save('../sitecomms/outgoing/' + dvr_filename)

    #  ========================
    #  = Add key errors sheet =
    #  ========================
    kerror_headings_original = ['validation_msg', 'key_fields', 'key_values', 'missing_fields']
    columns_to_skip = ['modifiedat', 'sourceFileTimeStamp']
    columns_to_redact = ['dob', 'namef', 'namel', 'idnhs', 'idpcode']
    cursor = sql_connect('spotid')
    for tab, tab_name in source_tables.items():
        kerror_fields = sql_get_cols('spotid', 'keys_dvr')
        stmt = """SELECT %s FROM keys_dvr
                    WHERE locate('%s', key_values)
                    AND sql_table = '%s' """ % (
                ', '.join(kerror_fields), sitecode, tab + '_import')
        cursor.execute(stmt)
        kerror_rows = cursor.fetchall()
        # skip on if no key errors
        if not kerror_rows:
            continue

        # create dictionary of kerror vals
        krows = []
        for kerror_row in kerror_rows:
            kdict = dict(zip(kerror_fields, kerror_row))
            krows.append(kdict)
Ejemplo n.º 6
0
        tab_offset = next_offset + 1

    dvr_book.save('../sitecomms/outgoing/' + dvr_filename)

    #  ========================
    #  = Add key errors sheet =
    #  ========================
    kerror_headings_original = [
        'validation_msg', 'key_fields', 'key_values', 'missing_fields'
    ]
    columns_to_skip = ['modifiedat', 'sourceFileTimeStamp']
    columns_to_redact = ['dob', 'namef', 'namel', 'idnhs', 'idpcode']
    cursor = sql_connect('spotid')
    for tab, tab_name in source_tables.items():
        kerror_fields = sql_get_cols('spotid', 'keys_dvr')
        stmt = """SELECT %s FROM keys_dvr
                    WHERE locate('%s', key_values)
                    AND sql_table = '%s' """ % (', '.join(kerror_fields),
                                                sitecode, tab + '_import')
        cursor.execute(stmt)
        kerror_rows = cursor.fetchall()
        # skip on if no key errors
        if not kerror_rows:
            continue

        # create dictionary of kerror vals
        krows = []
        for kerror_row in kerror_rows:
            kdict = dict(zip(kerror_fields, kerror_row))
            krows.append(kdict)