Example #1
0
def copy_db(src_val, dest_val, db_list, options):
    """Copy a database

    This method will copy a database and all of its objects and data from
    one server (source) to another (destination). Options are available to
    selectively ignore each type of object. The do_drop parameter is
    used to permit the copy to overwrite an existing destination database
    (default is to not overwrite).

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, verbose, do_drop, quiet,
                       connections, debug, exclude_names, exclude_patterns)

    Notes:
        do_drop  - if True, the database on the destination will be dropped
                   if it exists (default is False)
        quiet    - do not print any information during operation
                   (default is False)

    Returns bool True = success, False = error
    """
    verbose = options.get("verbose", False)
    quiet = options.get("quiet", False)
    do_drop = options.get("do_drop", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)
    skip_data = options.get("skip_data", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_tables = options.get("skip_tables", False)
    skip_gtid = options.get("skip_gtid", False)
    locking = options.get("locking", "snapshot")

    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(src_val, dest_val, conn_options)
    cloning = (src_val == dest_val) or dest_val is None

    source = servers[0]
    if cloning:
        destination = servers[0]
    else:
        destination = servers[1]
        # Test if SQL_MODE is 'NO_BACKSLASH_ESCAPES' in the destination server
        if destination.select_variable("SQL_MODE") == "NO_BACKSLASH_ESCAPES":
            print(
                "# WARNING: The SQL_MODE in the destination server is "
                "'NO_BACKSLASH_ESCAPES', it will be changed temporarily "
                "for data insertion.")

    src_gtid = source.supports_gtid() == 'ON'
    dest_gtid = destination.supports_gtid() == 'ON' if destination else False

    # Get list of all databases from source if --all is specified.
    # Ignore system databases.
    if options.get("all", False):
        # The --all option is valid only if not cloning.
        if not cloning:
            if not quiet:
                print "# Including all databases."
            rows = source.get_all_databases()
            for row in rows:
                db_list.append((row[0], None))  # Keep same name
        else:
            raise UtilError("Cannot copy all databases on the same server.")
    elif not skip_gtid and src_gtid:
        # Check to see if this is a full copy (complete backup)
        all_dbs = source.exec_query("SHOW DATABASES")
        dbs = [db[0] for db in db_list]
        for db in all_dbs:
            if db[0].upper() in [
                    "MYSQL", "INFORMATION_SCHEMA", "PERFORMANCE_SCHEMA"
            ]:
                continue
            if not db[0] in dbs:
                print _GTID_BACKUP_WARNING
                break

    # Do error checking and preliminary work:
    #  - Check user permissions on source and destination for all databases
    #  - Check to see if executing on same server but same db name (error)
    #  - Build list of tables to lock for copying data (if no skipping data)
    #  - Check storage engine compatibility
    for db_name in db_list:
        source_db = Database(source, db_name[0])
        if destination is None:
            destination = source
        if db_name[1] is None:
            db = db_name[0]
        else:
            db = db_name[1]
        dest_db = Database(destination, db)

        # Make a dictionary of the options
        access_options = {
            'skip_views': skip_views,
            'skip_procs': skip_procs,
            'skip_funcs': skip_funcs,
            'skip_grants': skip_grants,
            'skip_events': skip_events,
            'skip_triggers': skip_triggers,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)

        # Make a dictionary containing the list of objects from source db
        source_objects = {
            "views": source_db.get_db_objects("VIEW", columns="full"),
            "procs": source_db.get_db_objects("PROCEDURE", columns="full"),
            "funcs": source_db.get_db_objects("FUNCTION", columns="full"),
            "events": source_db.get_db_objects("EVENT", columns="full"),
            "triggers": source_db.get_db_objects("TRIGGER", columns="full"),
        }

        dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                   access_options, source_objects, do_drop)

        # Error is source db and destination db are the same and we're cloning
        if destination == source and db_name[0] == db_name[1]:
            raise UtilError("Destination database name is same as "
                            "source - source = %s, destination = %s" %
                            (db_name[0], db_name[1]))

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilError("Source database does not exist - %s" % db_name[0])

        # Check storage engines
        check_engine_options(destination, options.get("new_engine", None),
                             options.get("def_engine", None), False,
                             options.get("quiet", False))

    # Get replication commands if rpl_mode specified.
    # if --rpl specified, dump replication initial commands
    rpl_info = None

    # Turn off foreign keys if they were on at the start
    destination.disable_foreign_key_checks(True)

    # Get GTID commands
    if not skip_gtid:
        gtid_info = get_gtid_commands(source)
        if src_gtid and not dest_gtid:
            print _NON_GTID_WARNING % ("destination", "source", "to")
        elif not src_gtid and dest_gtid:
            print _NON_GTID_WARNING % ("source", "destination", "from")
    else:
        gtid_info = None
        if src_gtid and not cloning:
            print _GTID_WARNING

    # If cloning, turn off gtid generation
    if gtid_info and cloning:
        gtid_info = None
    # if GTIDs enabled, write the GTID commands
    if gtid_info and dest_gtid:
        # Check GTID version for complete feature support
        destination.check_gtid_version()
        # Check the gtid_purged value too
        destination.check_gtid_executed()
        for cmd in gtid_info[0]:
            print "# GTID operation:", cmd
            destination.exec_query(cmd, {'fetch': False, 'commit': False})

    if options.get("rpl_mode", None):
        new_opts = options.copy()
        new_opts['multiline'] = False
        new_opts['strict'] = True
        rpl_info = get_change_master_command(src_val, new_opts)
        destination.exec_query("STOP SLAVE", {'fetch': False, 'commit': False})

    # Copy (create) objects.
    # We need to delay trigger and events to after data is loaded
    new_opts = options.copy()
    new_opts['skip_triggers'] = True
    new_opts['skip_events'] = True

    # Get the table locks unless we are cloning with lock-all
    if not (cloning and locking == 'lock-all'):
        my_lock = get_copy_lock(source, db_list, options, True)

    _copy_objects(source, destination, db_list, new_opts)

    # If we are cloning, take the write locks prior to copying data
    if cloning and locking == 'lock-all':
        my_lock = get_copy_lock(source, db_list, options, True, cloning)

    # Copy tables data
    if not skip_data and not skip_tables:

        # Copy tables
        for db_name in db_list:

            # Get a Database class instance
            db = Database(source, db_name[0], options)

            # Perform the copy
            # Note: No longer use threads, use multiprocessing instead.
            db.init()
            db.copy_data(db_name[1],
                         options,
                         destination,
                         connections=1,
                         src_con_val=src_val,
                         dest_con_val=dest_val)

    # if cloning with lock-all unlock here to avoid system table lock conflicts
    if cloning and locking == 'lock-all':
        my_lock.unlock()

    # Create triggers for all databases
    if not skip_triggers:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_events'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    # Create events for all databases
    if not skip_events:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_triggers'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    if not (cloning and locking == 'lock-all'):
        my_lock.unlock()

    # if GTIDs enabled, write the GTID-related commands
    if gtid_info and dest_gtid:
        print "# GTID operation:", gtid_info[1]
        destination.exec_query(gtid_info[1])

    if options.get("rpl_mode", None):
        for cmd in rpl_info[_RPL_COMMANDS]:
            if cmd[0] == '#' and not quiet:
                print cmd
            else:
                if verbose:
                    print cmd
                destination.exec_query(cmd)
        destination.exec_query("START SLAVE;")

    # Turn on foreign keys if they were on at the start
    destination.disable_foreign_key_checks(False)

    if not quiet:
        print "#...done."
    return True
Example #2
0
def copy_db(src_val, dest_val, db_list, options):
    """Copy a database

    This method will copy a database and all of its objects and data from
    one server (source) to another (destination). Options are available to
    selectively ignore each type of object. The force parameter is
    used to permit the copy to overwrite an existing destination database
    (default is to not overwrite).

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, verbose, force, quiet,
                       connections, debug, exclude_names, exclude_patterns)

    Notes:
        force    - if True, the database on the destination will be dropped
                   if it exists (default is False)
        quiet    - do not print any information during operation
                   (default is False)

    Returns bool True = success, False = error
    """
    verbose = options.get("verbose", False)
    quiet = options.get("quiet", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)
    skip_data = options.get("skip_data", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_tables = options.get("skip_tables", False)
    skip_gtid = options.get("skip_gtid", False)
    locking = options.get("locking", "snapshot")

    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(src_val, dest_val, conn_options)
    cloning = (src_val == dest_val) or dest_val is None

    source = servers[0]
    if cloning:
        destination = servers[0]
    else:
        destination = servers[1]

    src_gtid = source.supports_gtid() == 'ON'
    dest_gtid = destination.supports_gtid() == 'ON'if destination else False

    # Get list of all databases from source if --all is specified.
    # Ignore system databases.
    if options.get("all", False):
        # The --all option is valid only if not cloning.
        if not cloning:
            if not quiet:
                print "# Including all databases."
            rows = source.get_all_databases()
            for row in rows:
                db_list.append((row[0], None))  # Keep same name
        else:
            raise UtilError("Cannot copy all databases on the same server.")
    elif not skip_gtid and src_gtid:
        # Check to see if this is a full copy (complete backup)
        all_dbs = source.exec_query("SHOW DATABASES")
        dbs = [db[0] for db in db_list]
        for db in all_dbs:
            if db[0].upper() in ["MYSQL", "INFORMATION_SCHEMA",
                                 "PERFORMANCE_SCHEMA"]:
                continue
            if not db[0] in dbs:
                print _GTID_BACKUP_WARNING
                break

    # Do error checking and preliminary work:
    #  - Check user permissions on source and destination for all databases
    #  - Check to see if executing on same server but same db name (error)
    #  - Build list of tables to lock for copying data (if no skipping data)
    #  - Check storage engine compatibility
    for db_name in db_list:
        source_db = Database(source, db_name[0])
        if destination is None:
            destination = source
        if db_name[1] is None:
            db = db_name[0]
        else:
            db = db_name[1]
        dest_db = Database(destination, db)

        # Make a dictionary of the options
        access_options = {
            'skip_views': skip_views,
            'skip_procs': skip_procs,
            'skip_funcs': skip_funcs,
            'skip_grants': skip_grants,
            'skip_events': skip_events,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)

        dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                   access_options)

        # Error is source db and destination db are the same and we're cloning
        if destination == source and db_name[0] == db_name[1]:
            raise UtilError("Destination database name is same as "
                            "source - source = %s, destination = %s" %
                            (db_name[0], db_name[1]))

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilError("Source database does not exist - %s" % db_name[0])

        # Check storage engines
        check_engine_options(destination,
                             options.get("new_engine", None),
                             options.get("def_engine", None),
                             False, options.get("quiet", False))

    # Get replication commands if rpl_mode specified.
    # if --rpl specified, dump replication initial commands
    rpl_info = None

    # Turn off foreign keys if they were on at the start
    destination.disable_foreign_key_checks(True)

    # Get GTID commands
    if not skip_gtid:
        gtid_info = get_gtid_commands(source)
        if src_gtid and not dest_gtid:
            print _NON_GTID_WARNING % ("destination", "source", "to")
        elif not src_gtid and dest_gtid:
            print _NON_GTID_WARNING % ("source", "destination", "from")
    else:
        gtid_info = None
        if src_gtid and not cloning:
            print _GTID_WARNING

    # If cloning, turn off gtid generation
    if gtid_info and cloning:
        gtid_info = None
    # if GTIDs enabled, write the GTID commands
    if gtid_info and dest_gtid:
        # Check GTID version for complete feature support
        destination.check_gtid_version()
        # Check the gtid_purged value too
        destination.check_gtid_executed()
        for cmd in gtid_info[0]:
            print "# GTID operation:", cmd
            destination.exec_query(cmd, {'fetch': False, 'commit': False})

    if options.get("rpl_mode", None):
        new_opts = options.copy()
        new_opts['multiline'] = False
        new_opts['strict'] = True
        rpl_info = get_change_master_command(src_val, new_opts)
        destination.exec_query("STOP SLAVE", {'fetch': False, 'commit': False})

    # Copy (create) objects.
    # We need to delay trigger and events to after data is loaded
    new_opts = options.copy()
    new_opts['skip_triggers'] = True
    new_opts['skip_events'] = True

    # Get the table locks unless we are cloning with lock-all
    if not (cloning and locking == 'lock-all'):
        my_lock = get_copy_lock(source, db_list, options, True)

    _copy_objects(source, destination, db_list, new_opts)

    # If we are cloning, take the write locks prior to copying data
    if cloning and locking == 'lock-all':
        my_lock = get_copy_lock(source, db_list, options, True, cloning)

    # Copy tables data
    if not skip_data and not skip_tables:

        # Copy tables
        for db_name in db_list:

            # Get a Database class instance
            db = Database(source, db_name[0], options)

            # Perform the copy
            # Note: No longer use threads, use multiprocessing instead.
            db.init()
            db.copy_data(db_name[1], options, destination, connections=1,
                         src_con_val=src_val, dest_con_val=dest_val)

    # if cloning with lock-all unlock here to avoid system table lock conflicts
    if cloning and locking == 'lock-all':
        my_lock.unlock()

    # Create triggers for all databases
    if not skip_triggers:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_events'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    # Create events for all databases
    if not skip_events:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_triggers'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    if not (cloning and locking == 'lock-all'):
        my_lock.unlock()

    # if GTIDs enabled, write the GTID-related commands
    if gtid_info and dest_gtid:
        print "# GTID operation:", gtid_info[1]
        destination.exec_query(gtid_info[1])

    if options.get("rpl_mode", None):
        for cmd in rpl_info[_RPL_COMMANDS]:
            if cmd[0] == '#' and not quiet:
                print cmd
            else:
                if verbose:
                    print cmd
                destination.exec_query(cmd)
        destination.exec_query("START SLAVE;")

    # Turn on foreign keys if they were on at the start
    destination.disable_foreign_key_checks(False)

    if not quiet:
        print "#...done."
    return True
Example #3
0
def import_file(dest_val, file_name, options):
    """Import a file

    This method reads a file and, if needed, transforms the file into
    discrete SQL statements for execution on the destination server.

    It accepts any of the formal structured files produced by the
    mysqlexport utility including formats SQL, CSV, TAB, GRID, and
    VERTICAL.

    It will read these files and skip or include the definitions or data
    as specified in the options. An error is raised for any conversion
    errors or errors while executing the statements.

    Users are highly encouraged to use the --dryrun option which will
    print the SQL statements without executing them.

    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    file_name[in]      name (and path) of the file to import
    options[in]        a dictionary containing the options for the import:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format, and debug)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.options import check_engine_options
    from mysql.utilities.common.table import Table
    from mysql.utilities.common.server import connect_servers

    # Helper method to dig through the definitions for create statements
    def _process_definitions(statements, table_col_list, db_name):
        # First, get the SQL strings
        sql_strs = _build_create_objects(obj_type, db_name, definitions)
        statements.extend(sql_strs)
        # Now, save the column list
        col_list = _build_col_metadata(obj_type, definitions)
        if len(col_list) > 0:
            table_col_list.extend(col_list)

    def _process_data(tbl_name, statements, columns, table_col_list,
                      table_rows, skip_blobs):
        # if there is data here, build bulk inserts
        # First, create table reference, then call insert_rows()
        tbl = Table(destination, tbl_name)
        # Need to check to see if table exists!
        if tbl.exists():
            tbl.get_column_metadata()
            col_meta = True
        elif len(table_col_list) > 0:
            col_meta = _get_column_metadata(tbl, table_col_list)
        else:
            fix_cols = []
            fix_cols.append((tbl.tbl_name, columns))
            col_meta = _get_column_metadata(tbl, fix_cols)
        if not col_meta:
            raise UtilError("Cannot build bulk insert statements without "
                            "the table definition.")
        ins_strs = tbl.make_bulk_insert(table_rows, tbl.q_db_name)
        if len(ins_strs[0]) > 0:
            statements.extend(ins_strs[0])
        if len(ins_strs[1]) > 0 and not skip_blobs:
            for update in ins_strs[1]:
                statements.append(update)

    # Gather options
    format = options.get("format", "sql")
    no_headers = options.get("no_headers", False)
    quiet = options.get("quiet", False)
    import_type = options.get("import_type", "definitions")
    single = options.get("single", True)
    dryrun = options.get("dryrun", False)
    do_drop = options.get("do_drop", False)
    skip_blobs = options.get("skip_blobs", False)
    skip_gtid = options.get("skip_gtid", False)

    # Attempt to connect to the destination server
    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(dest_val, None, conn_options)

    destination = servers[0]

    # Check storage engines
    check_engine_options(destination, options.get("new_engine", None),
                         options.get("def_engine", None), False,
                         options.get("quiet", False))

    if not quiet:
        if import_type == "both":
            str = "definitions and data"
        else:
            str = import_type
        print "# Importing %s from %s." % (str, file_name)

    # Setup variables we will need
    skip_header = not no_headers
    if format == "sql":
        skip_header = False
    get_db = True
    check_privileges = False
    db_name = None
    file = open(file_name)
    columns = []
    read_columns = False
    table_rows = []
    obj_type = ""
    definitions = []
    statements = []
    table_col_list = []
    tbl_name = ""
    skip_rpl = options.get("skip_rpl", False)
    gtid_command_found = False
    supports_gtid = servers[0].supports_gtid() == 'ON'
    skip_gtid_warning_printed = False
    gtid_version_checked = False

    # Read the file one object/definition group at a time
    for row in read_next(file, format):
        # Check for replication command
        if row[0] == "RPL_COMMAND":
            if not skip_rpl:
                statements.append(row[1])
            continue
        if row[0] == "GTID_COMMAND":
            gtid_command_found = True
            if not supports_gtid:
                # only display warning once
                if not skip_gtid_warning_printed:
                    print _GTID_SKIP_WARNING
                    skip_gtid_warning_printed = True
            elif not skip_gtid:
                if not gtid_version_checked:
                    gtid_version_checked = True
                    # Check GTID version for complete feature support
                    servers[0].check_gtid_version()
                    # Check the gtid_purged value too
                    servers[0].check_gtid_executed("import")
                statements.append(row[1])
            continue
        # If this is the first pass, get the database name from the file
        if get_db:
            if skip_header:
                skip_header = False
            else:
                db_name = _get_db(row)
                # quote db_name with backticks if needed
                if db_name and not is_quoted_with_backticks(db_name):
                    db_name = quote_with_backticks(db_name)
                get_db = False
                if do_drop and import_type != "data":
                    statements.append("DROP DATABASE IF EXISTS %s;" % db_name)
                if import_type != "data":
                    if not _skip_object("CREATE_DB", options) and \
                       not format == 'sql':
                        statements.append("CREATE DATABASE %s;" % db_name)

        # This is the first time through the loop so we must
        # check user permissions on source for all databases
        if db_name is not None:
            dest_db = Database(destination, db_name)

            # Make a dictionary of the options
            access_options = options.copy()

            dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                       access_options)

        # Now check to see if we want definitions, data, or both:
        if row[0] == "sql" or row[0] in _DEFINITION_LIST:
            if format != "sql" and len(row[1]) == 1:
                raise UtilError("Cannot read an import file generated with "
                                "--display=NAMES")

            if import_type in ("definitions", "both"):
                if format == "sql":
                    statements.append(row[1])
                else:
                    if obj_type == "":
                        obj_type = row[0]
                    if obj_type != row[0]:
                        if len(definitions) > 0:
                            _process_definitions(statements, table_col_list,
                                                 db_name)
                        obj_type = row[0]
                        definitions = []
                    if not _skip_object(row[0], options):
                        definitions.append(row[1])
        else:
            # see if there are any definitions to process
            if len(definitions) > 0:
                _process_definitions(statements, table_col_list, db_name)
                definitions = []

            if import_type in ("data", "both"):
                if _skip_object("DATA", options):
                    continue  # skip data
                elif format == "sql":
                    statements.append(row[1])
                else:
                    if row[0] == "BEGIN_DATA":
                        # Start of table so first row is columns.
                        if len(table_rows) > 0:
                            _process_data(tbl_name, statements, columns,
                                          table_col_list, table_rows,
                                          skip_blobs)
                            table_rows = []
                        read_columns = True
                        tbl_name = row[1]
                        if not is_quoted_with_backticks(tbl_name):
                            db, sep, tbl = tbl_name.partition('.')
                            q_db = quote_with_backticks(db)
                            q_tbl = quote_with_backticks(tbl)
                            tbl_name = ".".join([q_db, q_tbl])
                    else:
                        if read_columns:
                            columns = row[1]
                            read_columns = False
                        else:
                            if not single:
                                table_rows.append(row[1])
                            else:
                                str = _build_insert_data(
                                    columns, tbl_name, row[1])
                                statements.append(str)

    # Process remaining definitions
    if len(definitions) > 0:
        _process_definitions(statements, table_col_list, db_name)
        definitions = []

    # Process remaining data rows
    if len(table_rows) > 0:
        _process_data(tbl_name, statements, columns, table_col_list,
                      table_rows, skip_blobs)
        table_rows = []

    # Now process the statements
    _exec_statements(statements, destination, format, options, dryrun)

    file.close()

    # Check gtid process
    if supports_gtid and not gtid_command_found:
        print _GTID_MISSING_WARNING

    if not quiet:
        print "#...done."
    return True
Example #4
0
def import_file(dest_val, file_name, options):
    """Import a file

    This method reads a file and, if needed, transforms the file into
    discrete SQL statements for execution on the destination server.

    It accepts any of the formal structured files produced by the
    mysqlexport utility including formats SQL, CSV, TAB, GRID, and
    VERTICAL.

    It will read these files and skip or include the definitions or data
    as specified in the options. An error is raised for any conversion
    errors or errors while executing the statements.

    Users are highly encouraged to use the --dryrun option which will
    print the SQL statements without executing them.

    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    file_name[in]      name (and path) of the file to import
    options[in]        a dictionary containing the options for the import:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format, and debug)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.options import check_engine_options
    from mysql.utilities.common.table import Table
    from mysql.utilities.common.server import connect_servers

    # Helper method to dig through the definitions for create statements
    def _process_definitions(statements, table_col_list, db_name):
        # First, get the SQL strings
        sql_strs = _build_create_objects(obj_type, db_name, definitions)
        statements.extend(sql_strs)
        # Now, save the column list
        col_list = _build_col_metadata(obj_type, definitions)
        if len(col_list) > 0:
            table_col_list.extend(col_list)

    def _process_data(tbl_name, statements, columns,
                      table_col_list, table_rows, skip_blobs):
        # if there is data here, build bulk inserts
        # First, create table reference, then call insert_rows()
        tbl = Table(destination, tbl_name)
        # Need to check to see if table exists!
        if tbl.exists():
            tbl.get_column_metadata()
            col_meta = True
        elif len(table_col_list) > 0:
            col_meta = _get_column_metadata(tbl, table_col_list)
        else:
            fix_cols = []
            fix_cols.append((tbl.tbl_name, columns))
            col_meta = _get_column_metadata(tbl, fix_cols)
        if not col_meta:
            raise UtilError("Cannot build bulk insert statements without "
                                 "the table definition.")
        ins_strs = tbl.make_bulk_insert(table_rows, tbl.q_db_name)
        if len(ins_strs[0]) > 0:
            statements.extend(ins_strs[0])
        if len(ins_strs[1]) > 0 and not skip_blobs:
            for update in ins_strs[1]:
                statements.append(update)

    # Gather options
    format = options.get("format", "sql")
    no_headers = options.get("no_headers", False)
    quiet = options.get("quiet", False)
    import_type = options.get("import_type", "definitions")
    single = options.get("single", True)
    dryrun = options.get("dryrun", False)
    do_drop = options.get("do_drop", False)
    skip_blobs = options.get("skip_blobs", False)
    skip_gtid = options.get("skip_gtid", False)

    # Attempt to connect to the destination server
    conn_options = {
        'quiet'     : quiet,
        'version'   : "5.1.30",
    }
    servers = connect_servers(dest_val, None, conn_options)

    destination = servers[0]

    # Check storage engines
    check_engine_options(destination,
                         options.get("new_engine", None),
                         options.get("def_engine", None),
                         False, options.get("quiet", False))

    if not quiet:
        if import_type == "both":
            str = "definitions and data"
        else:
            str = import_type
        print "# Importing %s from %s." % (str, file_name)

    # Setup variables we will need
    skip_header = not no_headers
    if format == "sql":
        skip_header = False
    get_db = True
    check_privileges = False
    db_name = None
    file = open(file_name)
    columns = []
    read_columns = False
    table_rows = []
    obj_type = ""
    definitions = []
    statements = []
    table_col_list = []
    tbl_name = ""
    skip_rpl = options.get("skip_rpl", False)
    gtid_command_found = False
    supports_gtid = servers[0].supports_gtid() == 'ON'
    skip_gtid_warning_printed = False
    gtid_version_checked = False

    # Read the file one object/definition group at a time
    for row in read_next(file, format):
        # Check for replication command
        if row[0] == "RPL_COMMAND":
            if not skip_rpl:
                statements.append(row[1])
            continue
        if row[0] == "GTID_COMMAND":
            gtid_command_found = True
            if not supports_gtid:
                # only display warning once
                if not skip_gtid_warning_printed:
                    print _GTID_SKIP_WARNING
                    skip_gtid_warning_printed = True
            elif not skip_gtid:
                if not gtid_version_checked:
                    gtid_version_checked = True
                    # Check GTID version for complete feature support
                    servers[0].check_gtid_version()
                    # Check the gtid_purged value too
                    servers[0].check_gtid_executed("import")
                statements.append(row[1])
            continue
        # If this is the first pass, get the database name from the file
        if get_db:
            if skip_header:
                skip_header = False
            else:
                db_name = _get_db(row)
                # quote db_name with backticks if needed
                if db_name and not is_quoted_with_backticks(db_name):
                    db_name = quote_with_backticks(db_name)
                get_db = False
                if do_drop and import_type != "data":
                    statements.append("DROP DATABASE IF EXISTS %s;" % db_name)
                if import_type != "data":
                    if not _skip_object("CREATE_DB", options) and \
                       not format == 'sql':
                        statements.append("CREATE DATABASE %s;" % db_name)

        # This is the first time through the loop so we must
        # check user permissions on source for all databases
        if db_name is not None:
            dest_db = Database(destination, db_name)

            # Make a dictionary of the options
            access_options = options.copy()

            dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                       access_options)
            
        # Now check to see if we want definitions, data, or both:
        if row[0] == "sql" or row[0] in _DEFINITION_LIST:
            if format != "sql" and len(row[1]) == 1:
                raise UtilError("Cannot read an import file generated with "
                                "--display=NAMES")

            if import_type in ("definitions", "both"):
                if format == "sql":
                    statements.append(row[1])
                else:
                    if obj_type == "":
                        obj_type = row[0]
                    if obj_type != row[0]:
                        if len(definitions) > 0:
                            _process_definitions(statements, table_col_list,
                                                 db_name)
                        obj_type = row[0]
                        definitions = []
                    if not _skip_object(row[0], options):
                        definitions.append(row[1])
        else:
            # see if there are any definitions to process
            if len(definitions) > 0:
                _process_definitions(statements, table_col_list, db_name)
                definitions = []

            if import_type in ("data", "both"):
                if _skip_object("DATA", options):
                    continue  # skip data
                elif format == "sql":
                    statements.append(row[1])
                else:
                    if row[0] == "BEGIN_DATA":
                        # Start of table so first row is columns.
                        if len(table_rows) > 0:
                            _process_data(tbl_name, statements, columns,
                                          table_col_list, table_rows,
                                          skip_blobs)
                            table_rows = []
                        read_columns = True
                        tbl_name = row[1]
                        if not is_quoted_with_backticks(tbl_name):
                            db, sep, tbl = tbl_name.partition('.')
                            q_db = quote_with_backticks(db)
                            q_tbl = quote_with_backticks(tbl)
                            tbl_name = ".".join([q_db, q_tbl])
                    else:
                        if read_columns:
                            columns = row[1]
                            read_columns = False
                        else:
                            if not single:
                                table_rows.append(row[1])
                            else:
                                str = _build_insert_data(columns, tbl_name,
                                                         row[1])
                                statements.append(str)

    # Process remaining definitions                                 
    if len(definitions) > 0:
        _process_definitions(statements, table_col_list, db_name)
        definitions = []

    # Process remaining data rows
    if len(table_rows) > 0:
        _process_data(tbl_name, statements, columns,
                      table_col_list, table_rows, skip_blobs)
        table_rows = []

    # Now process the statements
    _exec_statements(statements, destination, format, options, dryrun)

    file.close()
    
    # Check gtid process
    if supports_gtid and not gtid_command_found:
        print _GTID_MISSING_WARNING

    if not quiet:
        print "#...done."
    return True
Example #5
0
def copy_db(src_val, dest_val, db_list, options):
    """Copy a database

    This method will copy a database and all of its objects and data from
    one server (source) to another (destination). Options are available to
    selectively ignore each type of object. The force parameter is
    used to permit the copy to overwrite an existing destination database
    (default is to not overwrite).

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, verbose, force, quiet,
                       connections, debug, exclude_names, exclude_patterns)

    Notes:
        force    - if True, the database on the destination will be dropped
                   if it exists (default is False)
        quiet    - do not print any information during operation
                   (default is False)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.options import check_engine_options
    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.command.dbexport import get_change_master_command

    verbose = options.get("verbose", False)
    quiet = options.get("quiet", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)
    skip_data = options.get("skip_data", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_tables = options.get("skip_tables", False)
    locking = options.get("locking", "snapshot")

    rpl_info = ([], None)

    conn_options = {
        'quiet'     : quiet,
        'version'   : "5.1.30",
    }
    servers = connect_servers(src_val, dest_val, conn_options)

    source = servers[0]
    destination = servers[1]

    cloning = (src_val == dest_val) or dest_val is None
    
    # Get list of all databases from source if --all is specified.
    # Ignore system databases.
    if options.get("all", False):
        # The --all option is valid only if not cloning.
        if not cloning:
            if not quiet:
                print "# Including all databases."
            rows = source.get_all_databases()
            for row in rows:
                db_list.append((row[0], None)) # Keep same name
        else:
            raise UtilError("Cannot copy all databases on the same server.")

    # Do error checking and preliminary work:
    #  - Check user permissions on source and destination for all databases
    #  - Check to see if executing on same server but same db name (error)
    #  - Build list of tables to lock for copying data (if no skipping data)
    #  - Check storage engine compatibility
    for db_name in db_list:
        source_db = Database(source, db_name[0])
        if destination is None:
            destination = source
        if db_name[1] is None:
            db = db_name[0]
        else:
            db = db_name[1]
        dest_db = Database(destination, db)
        
        # Make a dictionary of the options
        access_options = {
            'skip_views'  : skip_views,
            'skip_procs'  : skip_procs,
            'skip_funcs'  : skip_funcs,
            'skip_grants' : skip_grants,
            'skip_events' : skip_events,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)
        
        dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                   access_options)

        # Error is source db and destination db are the same and we're cloning
        if destination == source and db_name[0] == db_name[1]:
            raise UtilError("Destination database name is same as "
                                 "source - source = %s, destination = %s" %
                                 (db_name[0], db_name[1]))

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilError("Source database does not exist - %s" % db_name[0])
        
        # Check storage engines
        check_engine_options(destination,
                             options.get("new_engine", None),
                             options.get("def_engine", None),
                             False, options.get("quiet", False))

    # Get replication commands if rpl_mode specified.
    # if --rpl specified, dump replication initial commands
    if options.get("rpl_mode", None):
        new_opts = options.copy()
        new_opts['multiline'] = False
        new_opts['strict'] = True
        rpl_info = get_change_master_command(src_val, new_opts)
        destination.exec_query("STOP SLAVE;")

    # Copy objects
    # We need to delay trigger and events to after data is loaded
    new_opts = options.copy()
    new_opts['skip_triggers'] = True
    new_opts['skip_events'] = True
    
    # Get the table locks unless we are cloning with lock-all
    if not (cloning and locking == 'lock-all'):
        my_lock = get_copy_lock(source, db_list, options, True)

    _copy_objects(source, destination, db_list, new_opts)

    # If we are cloning, take the write locks prior to copying data
    if cloning and locking == 'lock-all':
        my_lock = get_copy_lock(source, db_list, options, True, cloning)

    # Copy data
    if not skip_data and not skip_tables:
    
        # Copy tables
        for db_name in db_list:
    
            # Get a Database class instance
            db = Database(source, db_name[0], options)
    
            # Perform the copy
            db.init()
            db.copy_data(db_name[1], options, destination,
                         options.get("threads", False))
            
    # if cloning with lock-all unlock here to avoid system table lock conflicts
    if cloning and locking == 'lock-all':
        my_lock.unlock()

    # Create triggers for all databases
    if not skip_triggers:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_events'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    # Create events for all databases
    if not skip_events:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_triggers'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    if not (cloning and locking == 'lock-all'):
        my_lock.unlock()

    if options.get("rpl_mode", None):
        for cmd in rpl_info[_RPL_COMMANDS]:
            if cmd[0] == '#' and not quiet:
                print cmd
            else:
                if verbose:
                    print cmd
                destination.exec_query(cmd)
        destination.exec_query("START SLAVE;")

    if not quiet:
        print "#...done."
    return True