Ejemplo n.º 1
0
def export_db_schema(JDBC_URL, bin_dir, class_path, MAX_JAVA_HEAP, DB_USER,
                     DB_PASSWORD, DB_NAME, DB_SCHEMA, subsystem_dir,
                     INCL_TABLES, SKIP_TABLES, OVERWRITE_TABLES, DDL_GEN):
    url, driver_jar, driver_class = get_db_details(JDBC_URL, bin_dir)
    if driver_jar and driver_class:
        # Start Java virtual machine if not started already:
        class_paths = class_path + ':' + driver_jar
        init_jvm(class_paths, MAX_JAVA_HEAP)

        try:
            jdbc = Jdbc(url, DB_USER, DB_PASSWORD, DB_NAME, DB_SCHEMA,
                        driver_jar, driver_class, True, True)
            if jdbc:
                # Get database metadata:
                db_tables, table_columns = get_db_meta(jdbc)
                export_schema(class_path, MAX_JAVA_HEAP, subsystem_dir, jdbc,
                              db_tables)
                export_tables, overwrite_tables = table_check(
                    INCL_TABLES, SKIP_TABLES, OVERWRITE_TABLES, db_tables,
                    subsystem_dir)

            if export_tables:
                # Copy schema data:
                copy_db_schema(subsystem_dir, jdbc, class_path, MAX_JAVA_HEAP,
                               export_tables, bin_dir, table_columns,
                               overwrite_tables, DDL_GEN)
            else:
                print_and_exit('No table data to export. Exiting.')

        except Exception as e:
            print_and_exit(e)

    else:
        print_and_exit('Not a supported jdbc url. Exiting')
Ejemplo n.º 2
0
def test_db_connect(conn,
                    bin_dir,
                    class_path,
                    java_path,
                    MAX_JAVA_HEAP,
                    INCL_TABLES,
                    SKIP_TABLES,
                    OVERWRITE_TABLES,
                    new_schema=False):

    url, driver_jar, driver_class = get_db_details(conn.jdbc_url, bin_dir)
    if driver_jar and driver_class:
        # Start Java virtual machine if not started already:
        class_paths = class_path + get_java_path_sep() + driver_jar

        init_jvm(class_paths, MAX_JAVA_HEAP)

        try:
            jdbc = Jdbc(url, conn.db_user, conn.db_password, conn.db_name,
                        conn.schema_name, driver_jar, driver_class, True, True)
            # TODO: Legg inn sjekk på at jdbc url er riktig, ikke bare på om db_name og skjema returnerer tabeller
            if jdbc:
                if new_schema:
                    run_ddl(
                        jdbc, 'CREATE SCHEMA IF NOT EXISTS "' +
                        conn.schema_name.upper() + '"')
                    # TODO: Ok med 'upper' for alle støttede databasetyper?
                else:
                    # Get database metadata:
                    db_tables, table_columns = get_db_meta(jdbc)
                    if not db_tables:
                        return "Database '" + conn.db_name + "', schema '" + conn.schema_name + "' returns no tables."

                    export_tables, overwrite_tables = table_check(
                        INCL_TABLES, SKIP_TABLES, OVERWRITE_TABLES, db_tables)
                return 'ok'

            if not export_tables and not new_schema:
                return 'No table data to export. Exiting.'

        except Exception as e:
            return e

    else:
        return 'Not a supported jdbc url. Exiting'
Ejemplo n.º 3
0
def export_db_schema(JDBC_URL, bin_dir, class_path, java_path, MAX_JAVA_HEAP,
                     DB_USER, DB_PASSWORD, DB_NAME, DB_SCHEMA, subsystem_dir,
                     INCL_TABLES, SKIP_TABLES, OVERWRITE_TABLES, DDL_GEN,
                     schema_names):
    url, driver_jar, driver_class = get_db_details(JDBC_URL, bin_dir)
    if driver_jar and driver_class:
        # Start Java virtual machine if not started already:
        class_paths = class_path + get_java_path_sep() + driver_jar
        init_jvm(class_paths, MAX_JAVA_HEAP)
        try:
            jdbc = Jdbc(url, DB_USER, DB_PASSWORD, DB_NAME, DB_SCHEMA,
                        driver_jar, driver_class, True, True)
            if jdbc:
                # Get database metadata:
                db_tables, table_columns = get_db_meta(
                    jdbc)  # WAIT: Fiks så ikke henter to ganger (også i test)
                export_schema(class_paths, MAX_JAVA_HEAP, subsystem_dir, jdbc,
                              schema_names)
                add_row_count_to_schema_file(subsystem_dir, db_tables,
                                             DB_SCHEMA)
                export_tables, overwrite_tables = table_check(
                    INCL_TABLES, SKIP_TABLES, OVERWRITE_TABLES, db_tables)

            if export_tables:
                # Copy schema data:
                copy_db_schema(subsystem_dir, jdbc, class_path, MAX_JAVA_HEAP,
                               export_tables, bin_dir, table_columns,
                               overwrite_tables, DDL_GEN)
                return 'ok'
            else:
                print('No table data to export. Exiting.')
                return

        except Exception as e:
            print(e)
            return

    else:
        print('Not a supported jdbc url. Exiting')
        return
Ejemplo n.º 4
0
def test_db_connect(JDBC_URL, bin_dir, class_path, java_path, MAX_JAVA_HEAP,
                    DB_USER, DB_PASSWORD, DB_NAME, DB_SCHEMA, INCL_TABLES,
                    SKIP_TABLES, OVERWRITE_TABLES):

    url, driver_jar, driver_class = get_db_details(JDBC_URL, bin_dir)

    if driver_jar and driver_class:
        # Start Java virtual machine if not started already:
        class_paths = class_path + get_java_path_sep() + driver_jar
        if driver_jar != 'org.h2.Driver':
            class_paths = class_paths + get_java_path_sep() + os.path.join(
                bin_dir, 'vendor', 'jars', 'h2.jar')

        init_jvm(class_paths, MAX_JAVA_HEAP)

        try:
            jdbc = Jdbc(url, DB_USER, DB_PASSWORD, DB_NAME, DB_SCHEMA,
                        driver_jar, driver_class, True, True)
            # TODO: Legg inn sjekk på at jdbc url er riktig, ikke bare på om db_name og skjema returnerer tabeller
            if jdbc:
                # Get database metadata:
                db_tables, table_columns = get_db_meta(
                    jdbc)  # WAIT: Endre så ikke henter columns og her

                if not db_tables:
                    return "Database '" + DB_NAME + "', schema '" + DB_SCHEMA + "' returns no tables."

                export_tables, overwrite_tables = table_check(
                    INCL_TABLES, SKIP_TABLES, OVERWRITE_TABLES, db_tables)
                return 'ok'

            if not export_tables:
                return 'No table data to export. Exiting.'

        except Exception as e:
            return e

    else:
        return 'Not a supported jdbc url. Exiting'
Ejemplo n.º 5
0
def export_db_schema(source, target, bin_dir, class_path, java_path,
                     MAX_JAVA_HEAP, subsystem_dir, INCL_TABLES, SKIP_TABLES,
                     OVERWRITE_TABLES, DDL_GEN):
    url, driver_jar, driver_class = get_db_details(source.jdbc_url, bin_dir)
    if driver_jar and driver_class:
        # Start Java virtual machine if not started already:
        class_paths = class_path + get_java_path_sep() + driver_jar
        init_jvm(class_paths, MAX_JAVA_HEAP)
        try:
            jdbc = Jdbc(url, source.db_user, source.db_password,
                        source.db_name, source.schema_name, driver_jar,
                        driver_class, True, True)
            if jdbc:
                # Get database metadata:
                db_tables, table_columns = get_db_meta(jdbc)
                export_schema(class_paths, MAX_JAVA_HEAP, java_path,
                              subsystem_dir, jdbc, db_tables)
                export_tables, overwrite_tables = table_check(
                    INCL_TABLES, SKIP_TABLES, OVERWRITE_TABLES, db_tables)

            if export_tables:
                # Copy schema data:
                copy_db_schema(subsystem_dir, jdbc, class_path, java_path,
                               MAX_JAVA_HEAP, export_tables, bin_dir,
                               table_columns, overwrite_tables, DDL_GEN,
                               target)
                return 'ok'
            else:
                print('No table data to export. Exiting.')
                return

        except Exception as e:
            print(e)
            return

    else:
        print('Not a supported jdbc url. Exiting')
        return
Ejemplo n.º 6
0
def gen_sync_table(table, columns, target_url, driver_jar, driver_class,
                   source_query):
    print("Syncing table '" + table + "'...")
    source_query = source_query + ' WHERE ('
    target_query = 'SELECT '

    for col in columns:
        source_query = source_query + '"' + col + '", '
        target_query = target_query + '"' + col + '", '

    source_query = source_query[:-2]
    target_query = target_query[:-2] + ' FROM "' + table + '";'

    t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar, driver_class,
                  True, True)
    target_values = run_select(t_jdbc, target_query)
    if len(columns) > 1:  # Compound key
        source_query = source_query + ') NOT IN (' + ', '.join(
            map(str, target_values)) + ')'
    else:
        source_query = source_query + ") NOT IN ('" + "','".join(
            map(str, ([x[0] for x in target_values]))) + "')"

    return source_query
# Postgresql example:
#url = 'jdbc:postgresql://*****:*****@ssw0rd'

# H2 example
url = 'jdbc:h2:/home/bba/Desktop/DoculiveHist_dbo;LAZY_QUERY_EXECUTION=1'
driver_jar = bin_dir + '/vendor/jdbc/h2-1.4.196.jar'
driver_class = 'org.h2.Driver'
user = ''
pwd = ''

jdbc = Jdbc(url, user, pwd, driver_jar, driver_class, True, True)

if jdbc:
    column_position = 3
    table_name = 'EDOKFILES'
    sql_str = 'SELECT EDKID, EFILE FROM ' + table_name
    cnt1 = 0
    cnt2 = 0
    for edkid, efile in jdbc.query(sql_str):
        cnt1 += 1
        fname = table_name + '_' + str(column_position) + '_' + str(cnt1)
        print('%6d. Parsing %s' % (cnt1, fname))
        with open(data_dir + '/' + fname, 'wb') as f:
            f.write(efile)
        cnt2 += 1
        #sys.exit()
Ejemplo n.º 8
0
def copy_db_schema(subsystem_dir, s_jdbc, class_path, max_java_heap,
                   export_tables, bin_dir, table_columns, overwrite_tables,
                   DDL_GEN):
    batch = wb_batch(class_path, max_java_heap)
    target_url = 'jdbc:h2:' + subsystem_dir + '/documentation/' + s_jdbc.db_name + '_' + s_jdbc.db_schema + ';autocommit=off'
    target_url, driver_jar, driver_class = get_db_details(target_url, bin_dir)
    t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar, driver_class,
                  True, True)
    target_tables = get_target_tables(t_jdbc)
    pk_dict = get_primary_keys(subsystem_dir, export_tables)
    unique_dict = get_unique_indexes(subsystem_dir, export_tables)
    blob_columns = get_blob_columns(subsystem_dir, export_tables)

    if DDL_GEN == 'PWCode':
        ddl_columns = get_ddl_columns(subsystem_dir)

    mode = '-mode=INSERT'
    std_params = ' -ignoreIdentityColumns=false -removeDefaults=true -commitEvery=1000 '
    previous_export = []
    for table, row_count in export_tables.items():
        insert = True
        params = mode + std_params

        col_query = ''
        if table in blob_columns:
            for column in blob_columns[table]:
                col_query = ',LENGTH("' + column + '") AS ' + column.upper(
                ) + '_BLOB_LENGTH_PWCODE'

        source_query = 'SELECT "' + '","'.join(
            table_columns[table]
        ) + '"' + col_query + ' FROM "' + s_jdbc.db_schema + '"."' + table + '"'

        if table in target_tables and table not in overwrite_tables:
            t_row_count = target_tables[table]
            if t_row_count == row_count:
                previous_export.append(table)
                continue
            elif t_row_count > row_count:
                print_and_exit(
                    "Error. More data in target than in source. Table '" +
                    table + "'. Exiting.")
            elif table in pk_dict:
                source_query = gen_sync_table(table, pk_dict[table],
                                              target_url, driver_jar,
                                              driver_class, source_query)
                insert = False
            elif table in unique_dict:
                source_query = gen_sync_table(table, unique_dict[table],
                                              target_url, driver_jar,
                                              driver_class, source_query)
                insert = False

        if insert:
            print("Copying table '" + table + "':")
            if DDL_GEN == 'SQLWB':
                params = mode + std_params + ' -createTarget=true -dropTarget=true'
            elif DDL_GEN == 'PWCode':
                t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar,
                              driver_class, True, True)
                ddl = '\nCREATE TABLE "' + table + '"\n(\n' + ddl_columns[
                    table][:-1] + '\n);'
                ddl = create_index(table, pk_dict, unique_dict, ddl)
                print(ddl)
                sql = 'DROP TABLE IF EXISTS "' + table + '"; ' + ddl
                run_ddl(t_jdbc, sql)
            else:
                print_and_exit(
                    "Valid values for DDL generation are 'PWCode' and 'SQLWB'. Exiting."
                )

            if table in blob_columns:
                for column in blob_columns[table]:
                    t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar,
                                  driver_class, True, True)
                    sql = 'ALTER TABLE "' + table + '" ADD COLUMN ' + column.upper(
                    ) + '_BLOB_LENGTH_PWCODE VARCHAR(255);'
                    run_ddl(t_jdbc, sql)

        batch.runScript("WbConnect -url='" + s_jdbc.url + "' -password="******";")
        target_conn = '"username=,password=,url=' + target_url + '" ' + params
        target_table = '"' + table + '"'
        copy_data_str = "WbCopy -targetConnection=" + target_conn + " -targetSchema=PUBLIC -targetTable=" + target_table + " -sourceQuery=" + source_query + ";"
        result = batch.runScript(copy_data_str)
        batch.runScript("WbDisconnect;")
        jp.java.lang.System.gc()
        if str(result) == 'Error':
            print_and_exit("Error on copying table '" + table +
                           "'\nScroll up for details.")

    if len(previous_export) == len(export_tables.keys()):
        print('All tables already exported.')
    elif not previous_export:
        print('Database export complete.')
    else:
        print('Database export complete. ' + str(len(previous_export)) +
              ' of ' + str(len(export_tables.keys())) +
              ' tables were already exported.')
Ejemplo n.º 9
0
def get_tables(sub_systems_dir, sub_system, jdbc_url, driver_jar, schema):
    tables = []
    table_columns = {}
    schema_file = os.path.join(sub_systems_dir, sub_system, 'header',
                               'metadata.xml')
    tree = ET.parse(schema_file)

    table_defs = tree.findall('table-def')
    for table_def in table_defs:
        table_schema = table_def.find('table-schema')
        if table_schema.text != schema:
            continue

        disposed = table_def.find('disposed')
        if disposed.text == 'true':
            continue

        table_name = table_def.find('table-name')
        # print(table_name.text)
        tables.append(table_name.text)

        text_columns = []
        file_columns = []
        column_defs = table_def.findall('column-def')
        column_defs[:] = sorted(
            column_defs, key=lambda elem: int(elem.findtext('dbms-position')))

        for column_def in column_defs:
            column_name = column_def.find('column-name')
            java_sql_type = int(column_def.find('java-sql-type').text)
            dbms_data_size = int(column_def.find('dbms-data-size').text)
            column_name_fixed = column_name.text

            # -> Disse regnes som blob: 2004, -4, -3, -2
            # Clob'er: -16, -1, 2005, 2011

            if java_sql_type in (-4, -3, -2, 2004, 2005, 2011, -16, -1):
                if (dbms_data_size > 4000
                        or java_sql_type in (2004, -4, -3, -2)):
                    # # TODO: Endre linje under når ferdig med IST filkonvertering -> eller skulle
                    # schema = 'PUBLIC'
                    jdbc = Jdbc(jdbc_url, '', '', '', schema, driver_jar,
                                'org.h2.Driver', True, True)
                    length_query = f'''SELECT MAX(LENGTH("{column_name_fixed}")) FROM "{schema}"."{table_name.text}"'''
                    result = run_select(jdbc, length_query)
                    max_length = [x[0] for x in result][0]

                    # TODO: Endre senere her slik at tomme felt ikke skrives til text_columns så fjernes i tsv
                    # -> Må legge inn 'disposed' på kolonne da og ha sjekk mot det i annen kode så det blir riktg ved opplasting
                    if max_length is not None:
                        if max_length > 4000:
                            file_columns.append(column_name_fixed)
                            # TODO: Mulig å endre til normalisert filnavn direkte her?
                            file_name_stem = "'" + str(
                                table_name.text).lower() + "_" + str(
                                    column_name_fixed).lower() + "_" + "'"
                            column_name_fixed = file_name_stem + ' || ROWNUM() AS "' + column_name_fixed + '"'

            text_columns.append(column_name_fixed)

        if text_columns:
            table_columns[table_name.text] = text_columns

        if file_columns:
            table_columns[table_name.text + '_lobs'] = file_columns

    return tables, table_columns
Ejemplo n.º 10
0

if __name__ == '__main__':
    bin_dir = os.environ["pwcode_bin_dir"]
    data_dir = os.environ["pwcode_data_dir"]
    class_path = os.environ['CLASSPATH']
    max_heap_size = 1024
    schema = 'PUBLIC'

    url = 'jdbc:h2:/home/bba/Desktop/DoculiveHist_dbo;LAZY_QUERY_EXECUTION=1'
    driver_jar = bin_dir + '/vendor/jdbc/h2-1.4.196.jar'
    driver_class = 'org.h2.Driver'
    user = ''
    pwd = ''

    jdbc = Jdbc(url, user, pwd, driver_jar, driver_class, True, True)
    if jdbc:
        conn = jdbc.connection

        #        test = get_catalogs(conn)

        cursor = conn.cursor()

        tables = get_tables(conn, schema)

        for table in tables:
            print(table)
            columns = get_columns(cursor, table)
            for column in columns:
                print(column)
Ejemplo n.º 11
0
def copy_db_schema(subsystem_dir, s_jdbc, class_path, max_java_heap,
                   export_tables, bin_dir, table_columns, overwrite_tables,
                   DDL_GEN):
    batch = wb_batch(class_path, max_java_heap)
    Path(os.path.join(subsystem_dir, 'content',
                      'database')).mkdir(parents=True, exist_ok=True)
    target_url = 'jdbc:h2:' + os.path.join(subsystem_dir, 'content',
                                           'database',
                                           s_jdbc.db_name) + ';autocommit=off'

    target_url, driver_jar, driver_class = get_db_details(target_url, bin_dir)
    t_jdbc = Jdbc(target_url, '', '', '', s_jdbc.db_schema, driver_jar,
                  driver_class, True, True)

    target_tables = get_target_tables(t_jdbc)
    pk_dict = get_primary_keys(subsystem_dir, export_tables)
    unique_dict = get_unique_indexes(subsystem_dir, export_tables)

    if DDL_GEN == 'Native':
        ddl_columns = get_ddl_columns(subsystem_dir, s_jdbc.db_schema, pk_dict,
                                      unique_dict)

    mode = '-mode=INSERT'
    std_params = ' -ignoreIdentityColumns=false -removeDefaults=true -commitEvery=1000 '
    previous_export = []
    t_count = 0
    for table, row_count in export_tables.items():
        t_count += 1
        insert = True
        params = mode + std_params

        col_query = ''
        # WAIT: Endre kode på blob length så virker også på mssql mm senere
        # if table in blob_columns:
        #     for column in blob_columns[table]:
        #         col_query = ',LENGTH("' + column + '") AS ' + column.upper() + '_BLOB_LENGTH_PWCODE'

        source_query = 'SELECT "' + '","'.join(
            table_columns[table]
        ) + '"' + col_query + ' FROM "' + s_jdbc.db_schema + '"."' + table + '"'

        if table in target_tables and table not in overwrite_tables:
            t_row_count = target_tables[table]
            if t_row_count == row_count:
                previous_export.append(table)
                continue
            elif t_row_count > row_count:
                print_and_exit(
                    "Error. More data in target than in source. Table '" +
                    table + "'. Exiting.")
            elif table in pk_dict:
                source_query = gen_sync_table(table, pk_dict[table],
                                              target_url, driver_jar,
                                              driver_class, source_query,
                                              s_jdbc.db_name)
                insert = False
            elif table in unique_dict:
                source_query = gen_sync_table(table, unique_dict[table],
                                              target_url, driver_jar,
                                              driver_class, source_query,
                                              s_jdbc.db_name)
                insert = False

        target_table = s_jdbc.db_schema + '"."' + table
        if insert:
            print("Copying table '" + table + "':")
            if DDL_GEN == 'SQL Workbench':
                params = mode + std_params + ' -createTarget=true -dropTarget=true'
            elif DDL_GEN == 'Native':
                # t_jdbc = Jdbc(target_url, '', '', '', s_jdbc.db_schema, driver_jar, driver_class, True, True)
                t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar,
                              driver_class, True, True)
                ddl = '\nCREATE TABLE "' + target_table + '"\n(\n' + ddl_columns[
                    table][:-1] + '\n);'
                ddl = create_index(table, pk_dict, unique_dict, ddl, t_count,
                                   s_jdbc.db_schema)
                print(ddl)
                sql = 'DROP TABLE IF EXISTS "' + target_table + '"; ' + ddl
                run_ddl(t_jdbc, sql)

            # WAIT: Endre kode på blob length så virker også på mssql mm senere
            # if table in blob_columns:
            #     for column in blob_columns[table]:
            #         t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar, driver_class, True, True)
            #         sql = 'ALTER TABLE "' + table + '" ADD COLUMN ' + column.upper() + '_BLOB_LENGTH_PWCODE VARCHAR(255);'
            #         run_ddl(t_jdbc, sql)

        target_table = '"' + target_table + '"'
        batch.runScript("WbConnect -url='" + s_jdbc.url + "' -username='******' -password="******";")
        target_conn = '"username=,password=,url=' + target_url + '" ' + params
        copy_data_str = "WbCopy -targetConnection=" + target_conn + " -targetTable=" + target_table + " -sourceQuery=" + source_query + ";"
        print(copy_data_str)
        result = batch.runScript(copy_data_str)
        batch.runScript("WbDisconnect;")
        jp.java.lang.System.gc()
        if str(result) == 'Error':
            print_and_exit("Error on copying table '" + table +
                           "'\nScroll up for details.")

    # TODO: Sørg for at prosess som kopierer db helt sikkert avsluttet før pakker som tar
    # --> se TODO i common.jvm.py
    if len(previous_export) == len(export_tables.keys()):
        print('All tables already exported.')
    elif not previous_export:
        print('Database export complete.')
    else:
        print('Database export complete. ' + str(len(previous_export)) +
              ' of ' + str(len(export_tables.keys())) +
              ' tables were already exported.')