'db_list': [db], 'options': options } copy_db_tasks.append(copy_task) # Create process pool. workers_pool = multiprocessing.Pool( processes=options['multiprocess'] ) # Concurrently copy databases. workers_pool.map_async(dbcopy.multiprocess_db_copy_task, copy_db_tasks) workers_pool.close() workers_pool.join() else: # Copy all specified databases (no database level concurrency). # Note: on POSIX systems multiprocessing is applied at the object # level (not database). dbcopy.copy_db(source_values, dest_values, db_list, options) # Print elapsed time. if opt.verbosity >= 3: print_elapsed_time(start_copy_time) except UtilError: _, err, _ = sys.exc_info() print("ERROR: {0}".format(err.errmsg)) sys.exit(1) sys.exit()
'srv_con': server_values, 'file_name': file_name, 'options': options } import_file_tasks.append(import_task) else: # Import file (no concurrency at the file level). dbimport.import_file(server_values, file_name, options) # Import files concurrently. if import_file_tasks: # Create process pool. workers_pool = multiprocessing.Pool( processes=options['multiprocess']) # Concurrently import files. workers_pool.map_async(dbimport.multiprocess_file_import_task, import_file_tasks) workers_pool.close() workers_pool.join() if opt.verbosity >= 3: print_elapsed_time(start_test) except UtilError: _, err, _ = sys.exc_info() print("ERROR: {0}".format(err.errmsg)) sys.exit(1) sys.exit()
for tmp_filename in tmp_files_list: if tmp_filename: tmp_file = open(tmp_filename, 'r') shutil.copyfileobj(tmp_file, output_file) tmp_file.close() os.remove(tmp_filename) else: # Export all specified databases (no database level concurrency). # Note: on POSIX systems multiprocessing is applied at the table # level (not database). export_databases(server_values, db_list, output_file, options) if output_filename is None: # Dump the export output to the stdout. output_file.seek(0) shutil.copyfileobj(output_file, sys.stdout) output_file.close() os.remove(output_file.name) # record elapsed time if opt.verbosity >= 3: sys.stdout.flush() print_elapsed_time(start_export_time) except UtilError: _, err, _ = sys.exc_info() print("ERROR: {0}".format(err.errmsg)) sys.exit(1) sys.exit()
'file_name': file_name, 'options': options } import_file_tasks.append(import_task) else: # Import file (no concurrency at the file level). dbimport.import_file(server_values, file_name, options) # Import files concurrently. if import_file_tasks: # Create process pool. workers_pool = multiprocessing.Pool( processes=options['multiprocess'] ) # Concurrently import files. workers_pool.map_async(dbimport.multiprocess_file_import_task, import_file_tasks) workers_pool.close() workers_pool.join() if opt.verbosity >= 3: print_elapsed_time(start_test) except UtilError: _, err, _ = sys.exc_info() print("ERROR: {0}".format(err.errmsg)) sys.exit(1) sys.exit()
# Copy databases concurrently for non posix systems (windows). if options["multiprocess"] > 1 and os.name != "posix": # Create copy databases tasks. copy_db_tasks = [] for db in db_list: copy_task = {"source_srv": source_values, "dest_srv": dest_values, "db_list": [db], "options": options} copy_db_tasks.append(copy_task) # Create process pool. workers_pool = multiprocessing.Pool(processes=options["multiprocess"]) # Concurrently copy databases. workers_pool.map_async(dbcopy.multiprocess_db_copy_task, copy_db_tasks) workers_pool.close() workers_pool.join() else: # Copy all specified databases (no database level concurrency). # Note: on POSIX systems multiprocessing is applied at the object # level (not database). dbcopy.copy_db(source_values, dest_values, db_list, options) # Print elapsed time. if opt.verbosity >= 3: print_elapsed_time(start_copy_time) except UtilError: _, err, _ = sys.exc_info() print("ERROR: {0}".format(err.errmsg)) sys.exit(1) sys.exit()