예제 #1
0
def capture_files(bin_dir, source_path, target_path):
    Path(os.path.dirname(target_path)).mkdir(parents=True, exist_ok=True)
    archive_format = Path(target_path).suffix[1:]

    if archive_format == 'wim':
        cmd = bin_dir + "/vendor/wimlib-imagex capture "
        if os.name == "posix":
            cmd = "wimcapture "  # WAIT: Bruk tar eller annet som kan mountes på posix. Trenger ikke wim da
    else:
        print_and_exit("'" + archive_format + "' not implemented yet")
    subprocess.run(cmd + source_path + " " + target_path +
                   " --no-acls --compress=none",
                   shell=True)
예제 #2
0
def run_ddl(jdbc, sql):
    result = 'Success'
    try:
        conn = jdbc.connection
        cursor = conn.cursor()
        cursor.execute(sql)
        cursor.close()
        conn.commit()
        conn.close()
    except Exception as e:
        result = e

    if result != 'Success':
        print_and_exit(result)
예제 #3
0
def table_check(incl_tables, skip_tables, overwrite_tables, db_tables,
                subsystem_dir):
    non_empty_tables = {k: v for (k, v) in db_tables.items() if v > 0}
    if incl_tables:
        for tbl in incl_tables:
            if tbl not in non_empty_tables:
                print_and_exit("Table '" + tbl +
                               "' is empty or not in schema. Exiting.")
        for tbl in list(non_empty_tables):
            if tbl not in incl_tables:
                del non_empty_tables[tbl]
    elif skip_tables:
        for tbl in skip_tables:
            if tbl in non_empty_tables:
                del non_empty_tables[tbl]
            else:
                print_and_exit("Table '" + tbl +
                               "' is empty or not in schema. Exiting.")

    if overwrite_tables:
        for tbl in overwrite_tables:
            if tbl not in non_empty_tables:
                print_and_exit("Table '" + tbl +
                               "' is empty or not in source schema. Exiting.")

    return non_empty_tables, overwrite_tables
예제 #4
0
def export_db_schema(JDBC_URL, bin_dir, class_path, MAX_JAVA_HEAP, DB_USER,
                     DB_PASSWORD, DB_NAME, DB_SCHEMA, subsystem_dir,
                     INCL_TABLES, SKIP_TABLES, OVERWRITE_TABLES, DDL_GEN):
    url, driver_jar, driver_class = get_db_details(JDBC_URL, bin_dir)
    if driver_jar and driver_class:
        # Start Java virtual machine if not started already:
        class_paths = class_path + ':' + driver_jar
        init_jvm(class_paths, MAX_JAVA_HEAP)

        try:
            jdbc = Jdbc(url, DB_USER, DB_PASSWORD, DB_NAME, DB_SCHEMA,
                        driver_jar, driver_class, True, True)
            if jdbc:
                # Get database metadata:
                db_tables, table_columns = get_db_meta(jdbc)
                export_schema(class_path, MAX_JAVA_HEAP, subsystem_dir, jdbc,
                              db_tables)
                export_tables, overwrite_tables = table_check(
                    INCL_TABLES, SKIP_TABLES, OVERWRITE_TABLES, db_tables,
                    subsystem_dir)

            if export_tables:
                # Copy schema data:
                copy_db_schema(subsystem_dir, jdbc, class_path, MAX_JAVA_HEAP,
                               export_tables, bin_dir, table_columns,
                               overwrite_tables, DDL_GEN)
            else:
                print_and_exit('No table data to export. Exiting.')

        except Exception as e:
            print_and_exit(e)

    else:
        print_and_exit('Not a supported jdbc url. Exiting')
예제 #5
0
def copy_db_schema(subsystem_dir, s_jdbc, class_path, max_java_heap,
                   export_tables, bin_dir, table_columns, overwrite_tables,
                   DDL_GEN):
    batch = wb_batch(class_path, max_java_heap)
    target_url = 'jdbc:h2:' + subsystem_dir + '/documentation/' + s_jdbc.db_name + '_' + s_jdbc.db_schema + ';autocommit=off'
    target_url, driver_jar, driver_class = get_db_details(target_url, bin_dir)
    t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar, driver_class,
                  True, True)
    target_tables = get_target_tables(t_jdbc)
    pk_dict = get_primary_keys(subsystem_dir, export_tables)
    unique_dict = get_unique_indexes(subsystem_dir, export_tables)
    blob_columns = get_blob_columns(subsystem_dir, export_tables)

    if DDL_GEN == 'PWCode':
        ddl_columns = get_ddl_columns(subsystem_dir)

    mode = '-mode=INSERT'
    std_params = ' -ignoreIdentityColumns=false -removeDefaults=true -commitEvery=1000 '
    previous_export = []
    for table, row_count in export_tables.items():
        insert = True
        params = mode + std_params

        col_query = ''
        if table in blob_columns:
            for column in blob_columns[table]:
                col_query = ',LENGTH("' + column + '") AS ' + column.upper(
                ) + '_BLOB_LENGTH_PWCODE'

        source_query = 'SELECT "' + '","'.join(
            table_columns[table]
        ) + '"' + col_query + ' FROM "' + s_jdbc.db_schema + '"."' + table + '"'

        if table in target_tables and table not in overwrite_tables:
            t_row_count = target_tables[table]
            if t_row_count == row_count:
                previous_export.append(table)
                continue
            elif t_row_count > row_count:
                print_and_exit(
                    "Error. More data in target than in source. Table '" +
                    table + "'. Exiting.")
            elif table in pk_dict:
                source_query = gen_sync_table(table, pk_dict[table],
                                              target_url, driver_jar,
                                              driver_class, source_query)
                insert = False
            elif table in unique_dict:
                source_query = gen_sync_table(table, unique_dict[table],
                                              target_url, driver_jar,
                                              driver_class, source_query)
                insert = False

        if insert:
            print("Copying table '" + table + "':")
            if DDL_GEN == 'SQLWB':
                params = mode + std_params + ' -createTarget=true -dropTarget=true'
            elif DDL_GEN == 'PWCode':
                t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar,
                              driver_class, True, True)
                ddl = '\nCREATE TABLE "' + table + '"\n(\n' + ddl_columns[
                    table][:-1] + '\n);'
                ddl = create_index(table, pk_dict, unique_dict, ddl)
                print(ddl)
                sql = 'DROP TABLE IF EXISTS "' + table + '"; ' + ddl
                run_ddl(t_jdbc, sql)
            else:
                print_and_exit(
                    "Valid values for DDL generation are 'PWCode' and 'SQLWB'. Exiting."
                )

            if table in blob_columns:
                for column in blob_columns[table]:
                    t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar,
                                  driver_class, True, True)
                    sql = 'ALTER TABLE "' + table + '" ADD COLUMN ' + column.upper(
                    ) + '_BLOB_LENGTH_PWCODE VARCHAR(255);'
                    run_ddl(t_jdbc, sql)

        batch.runScript("WbConnect -url='" + s_jdbc.url + "' -password="******";")
        target_conn = '"username=,password=,url=' + target_url + '" ' + params
        target_table = '"' + table + '"'
        copy_data_str = "WbCopy -targetConnection=" + target_conn + " -targetSchema=PUBLIC -targetTable=" + target_table + " -sourceQuery=" + source_query + ";"
        result = batch.runScript(copy_data_str)
        batch.runScript("WbDisconnect;")
        jp.java.lang.System.gc()
        if str(result) == 'Error':
            print_and_exit("Error on copying table '" + table +
                           "'\nScroll up for details.")

    if len(previous_export) == len(export_tables.keys()):
        print('All tables already exported.')
    elif not previous_export:
        print('Database export complete.')
    else:
        print('Database export complete. ' + str(len(previous_export)) +
              ' of ' + str(len(export_tables.keys())) +
              ' tables were already exported.')
예제 #6
0
def export_files(system_dir, subsystem_dir, export_type, system_name,
                 dir_paths, bin_dir, archive_format):
    Path(system_dir + '/content/sub_systems/').mkdir(parents=True,
                                                     exist_ok=True)
    file_export_done = False
    exported_dirs = []
    if export_type != 'DATABASE':
        source_paths = set(dir_paths)  # Remove duplicates
        for source_path in source_paths:  # Validate source paths
            if not os.path.isdir(source_path):
                print_and_exit("'" + source_path +
                               "' is not a valid path. Exiting.")

        subdirs = os.listdir(system_dir + '/content/sub_systems/')
        existing_subsystem = False
        for sub_dir in subdirs:
            source_paths_file = system_dir + '/content/sub_systems/' + sub_dir + '/content/documents/source_paths.txt'
            if os.path.isfile(source_paths_file):
                exported_dirs = [
                    line.rstrip('\n') for line in open(source_paths_file)
                ]
                count = 0
                for dir in source_paths:
                    if dir in exported_dirs:
                        count += 1
                        print("'" + dir + "' already exported.")
                        existing_subsystem = True
                        subsystem_dir = system_dir + '/content/sub_systems/' + sub_dir

                if count == len(source_paths):
                    print("All files already exported to '" + sub_dir + "'.")
                    file_export_done = True

            else:
                existing_subsystem = True
                subsystem_dir = system_dir + '/content/sub_systems/' + sub_dir

            if existing_subsystem:
                break

        if export_type == 'FILES' and not existing_subsystem:
            subsystem_dir = get_unique_dir(system_dir +
                                           '/content/sub_systems/' +
                                           system_name + '_')

    dirs = [
        system_dir + '/administrative_metadata/',
        system_dir + '/descriptive_metadata/',
        system_dir + '/content/documentation/', subsystem_dir + '/header',
        subsystem_dir + '/content/documents/',
        subsystem_dir + '/documentation/dip/'
    ]

    for dir in dirs:
        Path(dir).mkdir(parents=True, exist_ok=True)

    if export_type == 'DATABASE':
        return

    if source_paths and not file_export_done:
        source_paths_file = subsystem_dir + '/content/documents/source_paths.txt'
        with open(source_paths_file, 'w') as f:
            for dir in exported_dirs:
                if dir not in source_paths:
                    f.write(dir + '\n')

            i = 0
            for source_path in source_paths:
                if source_path in exported_dirs:
                    f.write(source_path + '\n')
                    continue

                done = False
                while not done:
                    i += 1
                    target_path = subsystem_dir + '/content/documents/' + "dir" + str(
                        i) + "." + archive_format
                    if not os.path.isfile(target_path):
                        if source_path not in exported_dirs:
                            capture_files(bin_dir, source_path, target_path)
                            f.write(source_path + '\n')
                            done = True
예제 #7
0
def copy_db_schema(subsystem_dir, s_jdbc, class_path, max_java_heap,
                   export_tables, bin_dir, table_columns, overwrite_tables,
                   DDL_GEN):
    batch = wb_batch(class_path, max_java_heap)
    Path(os.path.join(subsystem_dir, 'content',
                      'database')).mkdir(parents=True, exist_ok=True)
    target_url = 'jdbc:h2:' + os.path.join(subsystem_dir, 'content',
                                           'database',
                                           s_jdbc.db_name) + ';autocommit=off'

    target_url, driver_jar, driver_class = get_db_details(target_url, bin_dir)
    t_jdbc = Jdbc(target_url, '', '', '', s_jdbc.db_schema, driver_jar,
                  driver_class, True, True)

    target_tables = get_target_tables(t_jdbc)
    pk_dict = get_primary_keys(subsystem_dir, export_tables)
    unique_dict = get_unique_indexes(subsystem_dir, export_tables)

    if DDL_GEN == 'Native':
        ddl_columns = get_ddl_columns(subsystem_dir, s_jdbc.db_schema, pk_dict,
                                      unique_dict)

    mode = '-mode=INSERT'
    std_params = ' -ignoreIdentityColumns=false -removeDefaults=true -commitEvery=1000 '
    previous_export = []
    t_count = 0
    for table, row_count in export_tables.items():
        t_count += 1
        insert = True
        params = mode + std_params

        col_query = ''
        # WAIT: Endre kode på blob length så virker også på mssql mm senere
        # if table in blob_columns:
        #     for column in blob_columns[table]:
        #         col_query = ',LENGTH("' + column + '") AS ' + column.upper() + '_BLOB_LENGTH_PWCODE'

        source_query = 'SELECT "' + '","'.join(
            table_columns[table]
        ) + '"' + col_query + ' FROM "' + s_jdbc.db_schema + '"."' + table + '"'

        if table in target_tables and table not in overwrite_tables:
            t_row_count = target_tables[table]
            if t_row_count == row_count:
                previous_export.append(table)
                continue
            elif t_row_count > row_count:
                print_and_exit(
                    "Error. More data in target than in source. Table '" +
                    table + "'. Exiting.")
            elif table in pk_dict:
                source_query = gen_sync_table(table, pk_dict[table],
                                              target_url, driver_jar,
                                              driver_class, source_query,
                                              s_jdbc.db_name)
                insert = False
            elif table in unique_dict:
                source_query = gen_sync_table(table, unique_dict[table],
                                              target_url, driver_jar,
                                              driver_class, source_query,
                                              s_jdbc.db_name)
                insert = False

        target_table = s_jdbc.db_schema + '"."' + table
        if insert:
            print("Copying table '" + table + "':")
            if DDL_GEN == 'SQL Workbench':
                params = mode + std_params + ' -createTarget=true -dropTarget=true'
            elif DDL_GEN == 'Native':
                # t_jdbc = Jdbc(target_url, '', '', '', s_jdbc.db_schema, driver_jar, driver_class, True, True)
                t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar,
                              driver_class, True, True)
                ddl = '\nCREATE TABLE "' + target_table + '"\n(\n' + ddl_columns[
                    table][:-1] + '\n);'
                ddl = create_index(table, pk_dict, unique_dict, ddl, t_count,
                                   s_jdbc.db_schema)
                print(ddl)
                sql = 'DROP TABLE IF EXISTS "' + target_table + '"; ' + ddl
                run_ddl(t_jdbc, sql)

            # WAIT: Endre kode på blob length så virker også på mssql mm senere
            # if table in blob_columns:
            #     for column in blob_columns[table]:
            #         t_jdbc = Jdbc(target_url, '', '', '', 'PUBLIC', driver_jar, driver_class, True, True)
            #         sql = 'ALTER TABLE "' + table + '" ADD COLUMN ' + column.upper() + '_BLOB_LENGTH_PWCODE VARCHAR(255);'
            #         run_ddl(t_jdbc, sql)

        target_table = '"' + target_table + '"'
        batch.runScript("WbConnect -url='" + s_jdbc.url + "' -username='******' -password="******";")
        target_conn = '"username=,password=,url=' + target_url + '" ' + params
        copy_data_str = "WbCopy -targetConnection=" + target_conn + " -targetTable=" + target_table + " -sourceQuery=" + source_query + ";"
        print(copy_data_str)
        result = batch.runScript(copy_data_str)
        batch.runScript("WbDisconnect;")
        jp.java.lang.System.gc()
        if str(result) == 'Error':
            print_and_exit("Error on copying table '" + table +
                           "'\nScroll up for details.")

    # TODO: Sørg for at prosess som kopierer db helt sikkert avsluttet før pakker som tar
    # --> se TODO i common.jvm.py
    if len(previous_export) == len(export_tables.keys()):
        print('All tables already exported.')
    elif not previous_export:
        print('Database export complete.')
    else:
        print('Database export complete. ' + str(len(previous_export)) +
              ' of ' + str(len(export_tables.keys())) +
              ' tables were already exported.')