def _csv_to_sqlite3(args):
    csv_path = args.input
    mapping_path = args.mapping
    db_path = args.output
    csv_has_title_columns = args.csv_has_title_columns
    default_mapping_action = args.default_mapping_action

    path = ntpath.basename(csv_path)
    default_table_name = os.path.splitext(path)[0]

    # Load config
    table_name, custom_transformations, mappings, default_mapping_action = \
        load_and_process_mapping_config(mapping_path, default_table_name, default_mapping_action)

    # Clean table name
    table_name = clean_name(table_name)

    # Load custom transformations if they exist
    if custom_transformations:
        load_custom_transformations(mapping_path, custom_transformations)

    # Load csv file into a list
    all_csv_data = csv_read_file(csv_path)

    headers = []

    if csv_has_title_columns:
        # Remove headers
        headers = all_csv_data[0]
        all_csv_data = all_csv_data[1:]

    dbutils.create_and_connect(db_path)

    # Set mapping defaults
    set_mapping_defaults(all_csv_data, mappings, headers,
                         default_mapping_action)

    all_csv_data = csv_transform(all_csv_data, mappings)

    # Create database table
    dbutils.create_table(table_name, mappings)
    rows_pre_count = dbutils.count(table_name)

    # Load fk tables
    fk_mappings, _ = read_key_mappings(all_csv_data, mappings)
    fk_patch_data = fk_mappings_to_database(fk_mappings)

    # Substitute data with foreign key IDs
    patch_csv_data(fk_patch_data, all_csv_data)

    # Import result
    import_csv(all_csv_data, table_name, mappings)

    print_report(args, rows_pre_count, table_name, fk_mappings)
def _csv_to_sqlite3(args):
    csv_path = args.input
    mapping_path = args.mapping
    db_path = args.output
    csv_has_title_columns = args.csv_has_title_columns
    default_mapping_action = args.default_mapping_action

    path = ntpath.basename(csv_path)
    default_table_name = os.path.splitext(path)[0]

    # Load config
    table_name, custom_transformations, mappings, default_mapping_action = \
        load_and_process_mapping_config(mapping_path, default_table_name, default_mapping_action)
    
    # Clean table name
    table_name = clean_name(table_name)

    # Load custom transformations if they exist
    if custom_transformations:
        load_custom_transformations(mapping_path, custom_transformations)

    # Load csv file into a list
    all_csv_data = csv_read_file(csv_path)

    headers = []

    if csv_has_title_columns:
        # Remove headers
        headers = all_csv_data[0]
        all_csv_data = all_csv_data[1:]

    dbutils.create_and_connect(db_path)

    # Set mapping defaults
    set_mapping_defaults(all_csv_data, mappings, headers, default_mapping_action)

    all_csv_data = csv_transform(all_csv_data, mappings)

    # Create database table
    dbutils.create_table(table_name, mappings)
    rows_pre_count = dbutils.count(table_name)

    # Load fk tables
    fk_mappings, _ = read_key_mappings(all_csv_data, mappings)
    fk_patch_data = fk_mappings_to_database(fk_mappings)

    # Substitute data with foreign key IDs
    patch_csv_data(fk_patch_data, all_csv_data)

    # Import result
    import_csv(all_csv_data, table_name, mappings)

    print_report(args, rows_pre_count, table_name, fk_mappings)
def print_report(my_args, rows_pre_count, table_name, fk_mappings):
    # TODO: Created or Appended
    print('Added {} records to database {}'.format(
        dbutils.count(table_name) - rows_pre_count,
        ntpath.basename(my_args.output)))
    print('Options: ({}, {}CSV TITLE COLUMNS)'.format(
        my_args.default_mapping_action.upper(),
        '' if my_args.csv_has_title_columns else 'NO '))
    print('Database map:')
    print(' {} (MAIN TABLE)'.format(table_name))
    for fk_mapping in fk_mappings:
        print('  └─ {} (REFERENCE TABLE)'.format(fk_mapping['column_name']))
    print('')
def print_report(my_args, rows_pre_count, table_name, fk_mappings):
    # TODO: Created or Appended
    print('Added {} records to database {}'.format(
        dbutils.count(table_name) - rows_pre_count,
        ntpath.basename(my_args.output)
    ))
    print('Options: ({}, {}CSV TITLE COLUMNS)'.format(        
        my_args.default_mapping_action.upper(),
        '' if my_args.csv_has_title_columns else 'NO '
    ))
    print('Database map:')
    print(' {} (MAIN TABLE)'.format(table_name))
    for fk_mapping in fk_mappings:
        print('  └─ {} (REFERENCE TABLE)'.format(fk_mapping['column_name']))
    print('')
Exemple #5
0
    def test_csv_to_sqlite3(self):
        libcsv2sqlite.csv_to_sqlite3(self.args)

        # If there was no pk, there should be 3 results
        self.assertEqual(dbutils.count('taxi'), 2)
Exemple #6
0
    def test_csv_to_sqlite3(self):
        libcsv2sqlite.csv_to_sqlite3(self.args)

        self.assertTrue(dbutils.table_exists('person'))
        self.assertEqual(dbutils.count('person'), 4)
Exemple #7
0
 def test_csv_to_sqlite3(self):
     self.assertEqual(dbutils.count('taxi'), 2)
Exemple #8
0
    def test_csv_to_sqlite3(self):
        libcsv2sqlite.csv_to_sqlite3(self.args)

        # If there was no pk, there should be 3 results
        self.assertEqual(dbutils.count('taxi'), 2)
Exemple #9
0
    def test_csv_to_sqlite3(self):
        libcsv2sqlite.csv_to_sqlite3(self.args)

        self.assertTrue(dbutils.table_exists('person'))
        self.assertEqual(dbutils.count('person'), 4)
Exemple #10
0
 def test_csv_to_sqlite3(self):
     self.assertEqual(dbutils.count('taxi'), 2)