Example #1
0
def export_data(server_values, db_list, options):
    """Produce data for the tables in a database.

    This method retrieves the data for each table in the databases listed in
    the form of BULK INSERT (SQL) statements or in a tabular form to the file
    specified. The valid values for the format parameter are SQL, CSV, TSV,
    VERITCAL, or GRID.

    server_values[in]  server connection value dictionary.
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format, file_per_tbl,
                       and debug).

    Returns bool True = success, False = error
    """
    # Connect to source server.
    quiet = options.get("quiet", False)
    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(server_values, None, conn_options)
    source = servers[0]

    # Check user permissions on source for all databases.
    check_read_permissions(source, db_list, options)

    # Export databases data.
    _export_data(source, server_values, db_list, None, options)

    return True
    def _connect(self, conn):
        """Find the attached slaves for a list of server connections.

        This method connects to each server in the list and retrieves its
        slaves.
        It can be called recursively if the recurse parameter is True.

        conn[in]           Connection dictionary used to connect to server

        Returns tuple - master Server class instance, master:host string
        """
        conn_options = {
            'quiet': self.quiet,
            'src_name': "master",
            'dest_name': None,
            'version': "5.0.0",
            'unique': True,
            'verbose': self.verbose,
        }

        certs_paths = {}
        if 'ssl_ca' in dir(conn) and conn.ssl_ca is not None:
            certs_paths['ssl_ca'] = conn.ssl_ca
        if 'ssl_cert' in dir(conn) and conn.ssl_cert is not None:
            certs_paths['ssl_cert'] = conn.ssl_cert
        if 'ssl_key' in dir(conn) and conn.ssl_key is not None:
            certs_paths['ssl_key'] = conn.ssl_key

        conn_options.update(certs_paths)

        master_info = "%s:%s" % (conn['host'],
                                 conn['port'])
        master = None

        # Clear socket if used with a local server
        if (conn['host'] == 'localhost' or conn['host'] == "127.0.0.1" or
           conn['host'] == "::1" or conn['host'] == "[::1]"):
            conn['unix_socket'] = None

        # Increment num_retries if not set when --prompt is used
        if self.prompt_user and self.num_retries == 0:
            self.num_retries += 1

        # Attempt to connect to the server given the retry limit
        for i in range(0, self.num_retries + 1):
            try:
                servers = connect_servers(conn, None, conn_options)
                master = servers[0]
                break
            except UtilError, e:
                print "FAILED.\n"
                if i < self.num_retries and self.prompt_user:
                    print "Connection to %s has failed.\n" % master_info + \
                        "Please enter the following information " + \
                        "to connect to this server."
                    conn['user'] = raw_input("User name: ")
                    conn['passwd'] = getpass.getpass("Password: ")
                else:
                    # retries expired - re-raise error if still failing
                    raise UtilError(e.errmsg)
Example #3
0
def show_users(src_val, verbosity, fmt, dump=False):
    """Show all users except root and anonymous users on the server.

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    verbosty[in]       level of information to display
    fmt[in]            format of output
    dump[in]           if True, dump the grants for all users
                       default = False
    """
    conn_options = {"version": "5.1.0"}
    servers = connect_servers(src_val, None, conn_options)
    source = servers[0]

    if verbosity <= 1:
        _QUERY = """
            SELECT user, host FROM mysql.user
            WHERE user.user != ''
        """
        cols = ("user", "host")
    else:
        _QUERY = """
            SELECT user.user, user.host, db FROM mysql.user LEFT JOIN mysql.db
            ON user.user = db.user AND user.host = db.host
            WHERE user.user != ''
        """
        cols = ("user", "host", "database")

    users = source.exec_query(_QUERY)
    print "# All Users:"
    print_list(sys.stdout, fmt, cols, users)
    if dump:
        for user in users:
            _show_user_grants(source, None, "'%s'@'%s'" % user[0:2], verbosity)
Example #4
0
def check_replication(master_vals, slave_vals, options):
    """Check replication among a master and a slave.

    master_vals[in]    Master connection in form: user:passwd@host:port:socket
                       or login-path:port:socket
    slave_vals[in]     Slave connection in form user:passwd@host:port:socket
                       or login-path:port:socket
    options[in]        dictionary of options (verbosity, quiet, pedantic)

    Returns bool - True if all tests pass, False if errors, warnings, failures
    """
    quiet = options.get("quiet", False)
    width = options.get("width", 75)
    slave_status = options.get("slave_status", False)

    test_errors = False

    conn_options = {
        'quiet': quiet,
        'src_name': "master",
        'dest_name': 'slave',
        'version': "5.0.0",
        'unique': True,
    }

    certs_paths = {}
    if 'ssl_ca' in dir(options) and options.ssl_ca is not None:
        certs_paths['ssl_ca'] = options.ssl_ca
    if 'ssl_cert' in dir(options) and options.ssl_cert is not None:
        certs_paths['ssl_cert'] = options.ssl_cert
    if 'ssl_key' in dir(options) and options.ssl_key is not None:
        certs_paths['ssl_key'] = options.ssl_key
    conn_options.update(certs_paths)

    servers = connect_servers(master_vals, slave_vals, conn_options)

    rpl_options = options.copy()
    rpl_options['verbosity'] = options.get("verbosity", 0) > 0

    # Create an instance of the replication object
    rpl = Replication(servers[0], servers[1], rpl_options)

    if not quiet:
        print "Test Description",
        print ' ' * (width - 24),
        print "Status"
        print '-' * width

    for test in _get_replication_tests(rpl, options):
        if not test.exec_test():
            test_errors = True

    if slave_status and not quiet:
        try:
            print "\n#\n# Slave status: \n#"
            rpl.slave.show_status()
        except UtilRplError, e:
            print "ERROR:", e.errmsg
 def run(self):
     # Test mixes of the valid parameter types for server
     for i, test_case in enumerate(self.TEST_CASES):
         try:
             s = connect_servers(test_case[1], test_case[2],
                                 self.server_options)
         except UtilError, e:
             self.results.append((test_case[0], True, e.errmsg))
         except FormatError, e:
             self.results.append((test_case[0], True, e))
Example #6
0
def export_databases(server_values, db_list, options):
    """Export one or more databases
    
    This method performs the export of a list of databases first dumping the
    definitions then the data. It supports dumping replication commands (STOP
    SLAVE, CHANGE MASTER, START SLAVE) for exporting data for use in
    replication scenarios.
    
    server_values[in]      server connection value dictionary
    db_list[in]            list of database names
    options[in]            option dictionary
                           Must include the skip_* options for copy and export
    """
    
    from mysql.utilities.command.dbcopy import get_copy_lock
    from mysql.utilities.common.server import connect_servers

    export = options.get("export", "definitions")
    rpl_mode = options.get("rpl_mode", "master")
    quiet = options.get("quiet", False)
    verbosity = options.get("verbosity", 0)
    locking = options.get("locking", "snapshot")
    
    conn_options = {
        'quiet'     : quiet,
        'version'   : "5.1.30",
    }
    servers = connect_servers(server_values, None, conn_options)
    source = servers[0]
    
    # Lock tables first
    my_lock = get_copy_lock(source, db_list, options, True)

    # if --rpl specified, write initial replication command
    if rpl_mode:
        rpl_info = get_change_master_command(source, options)
        write_commands(rpl_info[_RPL_FILE], ["STOP SLAVE;"], options)
        
    # dump metadata
    if export in ("definitions", "both"):
        export_metadata(source, server_values, db_list, options)
        
    # dump data
    if export in ("data", "both"):
        if options.get("display", "brief") != "brief":
            print "# NOTE : --display is ignored for data export."
        export_data(source, server_values, db_list, options)
        
    # if --rpl specified, write replication end command
    if rpl_mode:
        write_commands(rpl_info[_RPL_FILE], rpl_info[_RPL_COMMANDS],
                                options)
        write_commands(rpl_info[_RPL_FILE], ["START SLAVE;"], options)

    my_lock.unlock()
    def run(self):
        for i in range(0, len(_TEST_CASES)):
            if self.debug:
                print "\nTest case {0} - {1}".format(i + 1, _TEST_CASES[i][0])
            try:
                src_val = get_connection_dictionary(_TEST_CASES[i][1])
                server_options = {'quiet': True, 'version': None,
                                  'src_name': "test", 'dest_name': None, }
                connect_servers(src_val, None, server_options)
            except UtilError as err:
                self.results.append((True, err.errmsg))
            except ConnectionValuesError as err:
                self.results.append((True, err.errmsg))
            except FormatError as err:
                self.results.append((True, err))
            else:
                self.results.append((False, ''))
            if self.debug:
                print "Test results:", self.results[i][0], self.results[i][1]

        return True
    def run(self):
        server_options = {'quiet': True, 'version': None,
                          'src_name': "test 1", 'dest_name': "test 2", }
        # Test mixes of the valid parameter types for server
        for i, test_case in enumerate(self.test_cases):
            try:
                connect_servers(test_case[1], test_case[2], server_options)
            except UtilError as err:
                self.results.append((test_case[0], True, err.errmsg))
            except FormatError as err:
                self.results.append((test_case[0], True, err))
            else:
                self.results.append((test_case[0], False, None))

        if self.debug:
            print "\nTest Results (test case, actual result, expected result):"
            for i in range(0, len(self.test_cases)):
                # pylint: disable=W0631
                print("{0}, {1}, {2}".format(
                    self.results[i][0], self.results[i][2], test_case[3]))

        return True
Example #9
0
def check_replication(master_vals, slave_vals, options):
    """Check replication among a master and a slave.
    
    master_vals[in]    Master connection in form user:passwd@host:port:sock
    slave_vals[in]     Slave connection in form user:passwd@host:port:sock
    options[in]        dictionary of options (verbosity, quiet, pedantic)
    
    Returns bool - True if all tests pass, False if errors, warnings, failures
    """
    
    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.common.replication import Replication
    
    quiet = options.get("quiet", False)
    width = options.get("width", 75)
    slave_status = options.get("slave_status", False)

    test_errors = False

    conn_options = {
        'quiet'     : quiet,
        'src_name'  : "master",
        'dest_name' : 'slave',
        'version'   : "5.0.0",
        'unique'    : True,
    }
    servers = connect_servers(master_vals, slave_vals, conn_options)
    
    rpl_options = options.copy()
    rpl_options['verbosity'] = options.get("verbosity", 0) > 0

    # Create an instance of the replication object
    rpl = Replication(servers[0], servers[1], rpl_options)
    
    if not quiet:
        print "Test Description",
        print ' ' * (width-24),
        print "Status"
        print '-' * width
    
    for test in _get_replication_tests(rpl, options):
        if not test.exec_test():
            test_errors = True
                
    if slave_status and not quiet:
        try:
            print "\n#\n# Slave status: \n#" 
            rpl.slave.show_status()
        except UtilRplError, e:
            print "ERROR:", e.errmsg
 def run(self):
     for i in range(0,len(_TEST_CASES)):
         if self.debug:
           print "\nTest case %s - %s" % (i+1, _TEST_CASES[i][0])
         try:
             src_val = get_connection_dictionary(_TEST_CASES[i][1])
             server_options = {
                 'quiet'     : True,
                 'version'   : None,
                 'src_name'  : "test",
                 'dest_name' : None,
             }
             s = connect_servers(src_val, None, server_options)
         except UtilError, e:
             self.results.append((True, e.errmsg))
         except FormatError, e:
             self.results.append((True, e))
Example #11
0
def export_metadata(server_values, db_list, options):
    """Produce rows to be used to recreate objects in a database.

    This method retrieves the objects for each database listed in the form
    of CREATE (SQL) statements or in a tabular form to the file specified.
    The valid values for the format parameter are SQL, CSV, TSV, VERTICAL,
    or GRID.

    server_values[in]  server connection value dictionary.
    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format,
                       debug, exclude_names, exclude_patterns)

    Returns bool True = success, False = error
    """
    # Connect to source server.
    quiet = options.get("quiet", False)
    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(server_values, None, conn_options)
    source = servers[0]

    if options.get("all", False):
        rows = source.get_all_databases()
        for row in rows:
            if row[0] not in db_list:
                db_list.append(row[0])

    # Check user permissions on source for all databases.
    check_read_permissions(source, db_list, options)

    # Export databases metadata.
    _export_metadata(source, db_list, None, options)

    return True
Example #12
0
def server_connect(server1_val, server2_val, object1, object2, options):
    """Connect to the servers
    
    This method connects to the servers and checks to see if the objects
    are different: db1.obj1 != db2.obj2 by name match.
    
    server1_val[in]    a dictionary containing connection information for the
                       first server including:
                       (user, password, host, port, socket)
    server2_val[in]    a dictionary containing connection information for the
                       second server including:
                       (user, password, host, port, socket)
    object1[in]        the first object in the compare
    object2[in]        the second object in the compare
    options[in]        a dictionary containing the options for the operation:
                       (quiet, verbosity)

    Returns tuple of Server objects (server1, server2)
    """
    from mysql.utilities.common.server import connect_servers

    quiet = options.get("quiet", False)
    verbosity = options.get("verbosity", 0)

    conn_options = {"quiet": quiet, "src_name": "server1", "dest_name": "server2", "version": "5.1.30"}

    try:
        servers = connect_servers(server1_val, server2_val, conn_options)
    except:
        raise

    server1 = servers[0]
    server2 = servers[1]
    if server2 is None:
        server2 = server1

    if server1 == server2 and object1 == object2:
        raise UtilError("Comparing the same object on the same server.")

    return (server1, server2)
Example #13
0
def show_users(src_val, verbosity, format):
    """Show all users except root and anonymous users on the server.

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    verbosty[in]       level of information to display
    format[in]         format of output
    """

    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.common.format import print_list

    conn_options = {
        'version'   : "5.1.0",
    }
    servers = connect_servers(src_val, None, conn_options)
    source = servers[0]

    if verbosity <= 1:
        _QUERY = """
            SELECT user, host FROM mysql.user
            WHERE user != 'root' and user != ''
        """
        cols = ("user", "host")
    else:
        _QUERY = """
            SELECT user.user, user.host, db FROM mysql.user LEFT JOIN mysql.db
            ON user.user = db.user AND user.host = db.host
            WHERE user.user != 'root' and user.user != ''
        """
        cols = ("user", "host", "database")

    users = source.exec_query(_QUERY)

    print_list(sys.stdout, format, cols, users)
Example #14
0
def _export_table_data(source_srv, table, output_file, options):
    """Export the table data.

    This private method retrieves the data for the specified table in SQL
    format (e.g., INSERT statements) or in a tabular form (GRID, TAB, CSV,
    VERTICAL) to the specified output file or a separated file, according to
    the defined options.

    source_srv[in]  Server instance or dictionary with connection values.
    table[in]       Table to export, tuple with database name and table name.
    output_file[in] Output file to store the export data.
    options[in]     Dictionary containing the options for the export:
                        (skip_tables, skip_views, skip_triggers, skip_procs,
                        skip_funcs, skip_events, skip_grants, skip_create,
                        skip_data, no_header, display, format, file_per_tbl,
                        and debug).

    return a filename if a temporary file is created to store the output result
    (used for multiprocessing) otherwise None.
    """
    frmt = options.get("format", "sql")
    no_headers = options.get("no_headers", True)
    single = options.get("single", False)
    skip_blobs = options.get("skip_blobs", False)
    quiet = options.get("quiet", False)
    file_per_table = options.get("file_per_tbl", False)

    # Handle source server instance or server connection values.
    # Note: For multiprocessing the use of connection values instead of a
    # server instance is required to avoid internal errors.
    if isinstance(source_srv, Server):
        source = source_srv
    else:
        # Get source server instance from connection values.
        conn_options = {
            'quiet': True,  # Avoid repeating output for multiprocessing.
            'version': "5.1.30",
        }
        servers = connect_servers(source_srv, None, conn_options)
        source = servers[0]

    # Handle qualified table name (with backtick quotes).
    db_name = table[0]
    tbl_name = "{0}.{1}".format(db_name, table[1])
    q_db_name = quote_with_backticks(db_name)
    q_tbl_name = "{0}.{1}".format(q_db_name, quote_with_backticks(table[1]))

    # Determine output file to store exported table data.
    if file_per_table:
        # Store result of table export to a separated file.
        file_name = _generate_tbl_filename(tbl_name, frmt)
        outfile = open(file_name, "w+")
        tempfile_used = False
    else:
        if output_file:
            # Output file to store result is defined.
            outfile = output_file
            tempfile_used = False
        else:
            # Store result in a temporary file (merged later).
            # Used by multiprocess export.
            tempfile_used = True
            outfile = tempfile.NamedTemporaryFile(delete=False)

    message = "# Data for table {0}:".format(q_tbl_name)
    outfile.write("{0}\n".format(message))

    tbl_options = {
        'verbose': False,
        'get_cols': True,
        'quiet': quiet
    }
    cur_table = Table(source, q_tbl_name, tbl_options)
    if single and frmt not in ("sql", "grid", "vertical"):
        retrieval_mode = -1
        first = True
    else:
        retrieval_mode = 1
        first = False

    # Find if we have some UNIQUE NOT NULL column indexes.
    unique_indexes = len(cur_table.get_not_null_unique_indexes())

    # If all columns are BLOBS or there aren't any UNIQUE NOT NULL indexes
    # then rows won't be correctly copied using the update statement,
    # so we must warn the user.
    if (not skip_blobs and frmt == "sql" and
            (cur_table.blob_columns == len(cur_table.column_names)
             or (not unique_indexes and cur_table.blob_columns))):
        print("# WARNING: Table {0}.{1} contains only BLOB and TEXT "
              "fields. Rows will be generated with separate INSERT "
              "statements.".format(cur_table.db_name, cur_table.tbl_name))

    for data_rows in cur_table.retrieve_rows(retrieval_mode):
        _export_row(data_rows, cur_table, frmt, single,
                    skip_blobs, first, no_headers, outfile)
        if first:
            first = False

    if file_per_table:
        outfile.close()

    return outfile.name if tempfile_used else None
Example #15
0
except UtilError, e:
    parser.error(e.errmsg)

from mysql.utilities.common.server import connect_servers

# Parse source connection values
try:
    source_values = parse_connection(opt.server)
except:
    parser.error("Source connection values invalid or cannot be parsed.")

try:
    conn_options = {
        'version'   : "5.1.30",
    }
    servers = connect_servers(source_values, None, conn_options)
except UtilError, e:
    parser.error(e.errmsg)

try:
    res = servers[0].show_server_variable("datadir")
    datadir = res[0][1]
except UtilError, e:
    parser.error(e.errmsg)

if not os.access(datadir, os.R_OK):
    print "\nNOTICE: Your user account does not have read access to the " + \
          "datadir. Data sizes will be calculated and actual file sizes " + \
          "may be omitted. Some features may be unavailable.\n"

# Set options for database operations.
Example #16
0
def setup_replication(master_vals, slave_vals, rpl_user,
                      options, test_db=None):
    """Setup replication among a master and a slave.
    
    master_vals[in]    Master connection in form user:passwd@host:port:sock
    slave_vals[in]     Slave connection in form user:passwd@host:port:sock
    rpl_user[in]       Replication user in the form user:passwd
    options[in]        dictionary of options (verbosity, quiet, pedantic)
    test_db[in]        Test replication using this database name (optional)
                       default = None
    """
    
    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.common.replication import Replication
    
    verbosity = options.get("verbosity", 0)

    conn_options = {
        'src_name'  : "master",
        'dest_name' : 'slave',
        'version'   : "5.0.0",
        'unique'    : True,
    }
    servers = connect_servers(master_vals, slave_vals, conn_options)
    master = servers[0]
    slave = servers[1]
    
    rpl_options = options.copy()
    rpl_options['verbosity'] = verbosity > 0
    
    # Create an instance of the replication object
    rpl = Replication(master, slave, rpl_options)
    errors = rpl.check_server_ids()
    for error in errors:
        print error
            
    # Check for server_id uniqueness
    if verbosity > 0:
        print "# master id = %s" % master.get_server_id()
        print "#  slave id = %s" % slave.get_server_id()

    errors = rpl.check_server_uuids()
    for error in errors:
        print error
    
    # Check for server_uuid uniqueness
    if verbosity > 0:
        print "# master uuid = %s" % master.get_server_uuid()
        print "#  slave uuid = %s" % slave.get_server_uuid()

    # Check InnoDB compatibility
    if verbosity > 0:
        print "# Checking InnoDB statistics for type and version conflicts."

    errors = rpl.check_innodb_compatibility(options)
    for error in errors:
        print error
    
    # Checking storage engines                
    if verbosity > 0:
        print "# Checking storage engines..."
        
    errors = rpl.check_storage_engines(options)
    for error in errors:
        print error
            
    # Check master for binary logging
    print "# Checking for binary logging on master..."
    errors = rpl.check_master_binlog()
    if not errors == []:
        raise UtilError(errors[0])
        
    # Setup replication
    print "# Setting up replication..."
    if not rpl.setup(rpl_user, 10):
        raise UtilError("Cannot setup replication.")
        
    # Test the replication setup.
    if test_db:
        rpl.test(test_db, 10)
        
    print "# ...done."
Example #17
0
def import_file(dest_val, file_name, options):
    """Import a file

    This method reads a file and, if needed, transforms the file into
    discrete SQL statements for execution on the destination server.

    It accepts any of the formal structured files produced by the
    mysqlexport utility including formats SQL, CSV, TAB, GRID, and
    VERTICAL.

    It will read these files and skip or include the definitions or data
    as specified in the options. An error is raised for any conversion
    errors or errors while executing the statements.

    Users are highly encouraged to use the --dryrun option which will
    print the SQL statements without executing them.

    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    file_name[in]      name (and path) of the file to import
    options[in]        a dictionary containing the options for the import:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format, and debug)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.options import check_engine_options
    from mysql.utilities.common.table import Table
    from mysql.utilities.common.server import connect_servers

    # Helper method to dig through the definitions for create statements
    def _process_definitions(statements, table_col_list, db_name):
        # First, get the SQL strings
        sql_strs = _build_create_objects(obj_type, db_name, definitions)
        statements.extend(sql_strs)
        # Now, save the column list
        col_list = _build_col_metadata(obj_type, definitions)
        if len(col_list) > 0:
            table_col_list.extend(col_list)

    def _process_data(tbl_name, statements, columns,
                      table_col_list, table_rows, skip_blobs):
        # if there is data here, build bulk inserts
        # First, create table reference, then call insert_rows()
        tbl = Table(destination, tbl_name)
        # Need to check to see if table exists!
        if tbl.exists():
            tbl.get_column_metadata()
            col_meta = True
        elif len(table_col_list) > 0:
            col_meta = _get_column_metadata(tbl, table_col_list)
        else:
            fix_cols = []
            fix_cols.append((tbl.tbl_name, columns))
            col_meta = _get_column_metadata(tbl, fix_cols)
        if not col_meta:
            raise UtilError("Cannot build bulk insert statements without "
                                 "the table definition.")
        ins_strs = tbl.make_bulk_insert(table_rows, tbl.q_db_name)
        if len(ins_strs[0]) > 0:
            statements.extend(ins_strs[0])
        if len(ins_strs[1]) > 0 and not skip_blobs:
            for update in ins_strs[1]:
                statements.append(update)

    # Gather options
    format = options.get("format", "sql")
    no_headers = options.get("no_headers", False)
    quiet = options.get("quiet", False)
    import_type = options.get("import_type", "definitions")
    single = options.get("single", True)
    dryrun = options.get("dryrun", False)
    do_drop = options.get("do_drop", False)
    skip_blobs = options.get("skip_blobs", False)
    skip_gtid = options.get("skip_gtid", False)

    # Attempt to connect to the destination server
    conn_options = {
        'quiet'     : quiet,
        'version'   : "5.1.30",
    }
    servers = connect_servers(dest_val, None, conn_options)

    destination = servers[0]

    # Check storage engines
    check_engine_options(destination,
                         options.get("new_engine", None),
                         options.get("def_engine", None),
                         False, options.get("quiet", False))

    if not quiet:
        if import_type == "both":
            str = "definitions and data"
        else:
            str = import_type
        print "# Importing %s from %s." % (str, file_name)

    # Setup variables we will need
    skip_header = not no_headers
    if format == "sql":
        skip_header = False
    get_db = True
    check_privileges = False
    db_name = None
    file = open(file_name)
    columns = []
    read_columns = False
    table_rows = []
    obj_type = ""
    definitions = []
    statements = []
    table_col_list = []
    tbl_name = ""
    skip_rpl = options.get("skip_rpl", False)
    gtid_command_found = False
    supports_gtid = servers[0].supports_gtid() == 'ON'
    skip_gtid_warning_printed = False
    gtid_version_checked = False

    # Read the file one object/definition group at a time
    for row in read_next(file, format):
        # Check for replication command
        if row[0] == "RPL_COMMAND":
            if not skip_rpl:
                statements.append(row[1])
            continue
        if row[0] == "GTID_COMMAND":
            gtid_command_found = True
            if not supports_gtid:
                # only display warning once
                if not skip_gtid_warning_printed:
                    print _GTID_SKIP_WARNING
                    skip_gtid_warning_printed = True
            elif not skip_gtid:
                if not gtid_version_checked:
                    gtid_version_checked = True
                    # Check GTID version for complete feature support
                    servers[0].check_gtid_version()
                    # Check the gtid_purged value too
                    servers[0].check_gtid_executed("import")
                statements.append(row[1])
            continue
        # If this is the first pass, get the database name from the file
        if get_db:
            if skip_header:
                skip_header = False
            else:
                db_name = _get_db(row)
                # quote db_name with backticks if needed
                if db_name and not is_quoted_with_backticks(db_name):
                    db_name = quote_with_backticks(db_name)
                get_db = False
                if do_drop and import_type != "data":
                    statements.append("DROP DATABASE IF EXISTS %s;" % db_name)
                if import_type != "data":
                    if not _skip_object("CREATE_DB", options) and \
                       not format == 'sql':
                        statements.append("CREATE DATABASE %s;" % db_name)

        # This is the first time through the loop so we must
        # check user permissions on source for all databases
        if db_name is not None:
            dest_db = Database(destination, db_name)

            # Make a dictionary of the options
            access_options = options.copy()

            dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                       access_options)
            
        # Now check to see if we want definitions, data, or both:
        if row[0] == "sql" or row[0] in _DEFINITION_LIST:
            if format != "sql" and len(row[1]) == 1:
                raise UtilError("Cannot read an import file generated with "
                                "--display=NAMES")

            if import_type in ("definitions", "both"):
                if format == "sql":
                    statements.append(row[1])
                else:
                    if obj_type == "":
                        obj_type = row[0]
                    if obj_type != row[0]:
                        if len(definitions) > 0:
                            _process_definitions(statements, table_col_list,
                                                 db_name)
                        obj_type = row[0]
                        definitions = []
                    if not _skip_object(row[0], options):
                        definitions.append(row[1])
        else:
            # see if there are any definitions to process
            if len(definitions) > 0:
                _process_definitions(statements, table_col_list, db_name)
                definitions = []

            if import_type in ("data", "both"):
                if _skip_object("DATA", options):
                    continue  # skip data
                elif format == "sql":
                    statements.append(row[1])
                else:
                    if row[0] == "BEGIN_DATA":
                        # Start of table so first row is columns.
                        if len(table_rows) > 0:
                            _process_data(tbl_name, statements, columns,
                                          table_col_list, table_rows,
                                          skip_blobs)
                            table_rows = []
                        read_columns = True
                        tbl_name = row[1]
                        if not is_quoted_with_backticks(tbl_name):
                            db, sep, tbl = tbl_name.partition('.')
                            q_db = quote_with_backticks(db)
                            q_tbl = quote_with_backticks(tbl)
                            tbl_name = ".".join([q_db, q_tbl])
                    else:
                        if read_columns:
                            columns = row[1]
                            read_columns = False
                        else:
                            if not single:
                                table_rows.append(row[1])
                            else:
                                str = _build_insert_data(columns, tbl_name,
                                                         row[1])
                                statements.append(str)

    # Process remaining definitions                                 
    if len(definitions) > 0:
        _process_definitions(statements, table_col_list, db_name)
        definitions = []

    # Process remaining data rows
    if len(table_rows) > 0:
        _process_data(tbl_name, statements, columns,
                      table_col_list, table_rows, skip_blobs)
        table_rows = []

    # Now process the statements
    _exec_statements(statements, destination, format, options, dryrun)

    file.close()
    
    # Check gtid process
    if supports_gtid and not gtid_command_found:
        print _GTID_MISSING_WARNING

    if not quiet:
        print "#...done."
    return True
Example #18
0
def copy_db(src_val, dest_val, db_list, options):
    """Copy a database

    This method will copy a database and all of its objects and data from
    one server (source) to another (destination). Options are available to
    selectively ignore each type of object. The force parameter is
    used to permit the copy to overwrite an existing destination database
    (default is to not overwrite).

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, verbose, force, quiet,
                       connections, debug, exclude_names, exclude_patterns)

    Notes:
        force    - if True, the database on the destination will be dropped
                   if it exists (default is False)
        quiet    - do not print any information during operation
                   (default is False)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.options import check_engine_options
    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.command.dbexport import get_change_master_command

    verbose = options.get("verbose", False)
    quiet = options.get("quiet", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)
    skip_data = options.get("skip_data", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_tables = options.get("skip_tables", False)
    locking = options.get("locking", "snapshot")

    rpl_info = ([], None)

    conn_options = {
        'quiet'     : quiet,
        'version'   : "5.1.30",
    }
    servers = connect_servers(src_val, dest_val, conn_options)

    source = servers[0]
    destination = servers[1]

    cloning = (src_val == dest_val) or dest_val is None
    
    # Get list of all databases from source if --all is specified.
    # Ignore system databases.
    if options.get("all", False):
        # The --all option is valid only if not cloning.
        if not cloning:
            if not quiet:
                print "# Including all databases."
            rows = source.get_all_databases()
            for row in rows:
                db_list.append((row[0], None)) # Keep same name
        else:
            raise UtilError("Cannot copy all databases on the same server.")

    # Do error checking and preliminary work:
    #  - Check user permissions on source and destination for all databases
    #  - Check to see if executing on same server but same db name (error)
    #  - Build list of tables to lock for copying data (if no skipping data)
    #  - Check storage engine compatibility
    for db_name in db_list:
        source_db = Database(source, db_name[0])
        if destination is None:
            destination = source
        if db_name[1] is None:
            db = db_name[0]
        else:
            db = db_name[1]
        dest_db = Database(destination, db)
        
        # Make a dictionary of the options
        access_options = {
            'skip_views'  : skip_views,
            'skip_procs'  : skip_procs,
            'skip_funcs'  : skip_funcs,
            'skip_grants' : skip_grants,
            'skip_events' : skip_events,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)
        
        dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                   access_options)

        # Error is source db and destination db are the same and we're cloning
        if destination == source and db_name[0] == db_name[1]:
            raise UtilError("Destination database name is same as "
                                 "source - source = %s, destination = %s" %
                                 (db_name[0], db_name[1]))

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilError("Source database does not exist - %s" % db_name[0])
        
        # Check storage engines
        check_engine_options(destination,
                             options.get("new_engine", None),
                             options.get("def_engine", None),
                             False, options.get("quiet", False))

    # Get replication commands if rpl_mode specified.
    # if --rpl specified, dump replication initial commands
    if options.get("rpl_mode", None):
        new_opts = options.copy()
        new_opts['multiline'] = False
        new_opts['strict'] = True
        rpl_info = get_change_master_command(src_val, new_opts)
        destination.exec_query("STOP SLAVE;")

    # Copy objects
    # We need to delay trigger and events to after data is loaded
    new_opts = options.copy()
    new_opts['skip_triggers'] = True
    new_opts['skip_events'] = True
    
    # Get the table locks unless we are cloning with lock-all
    if not (cloning and locking == 'lock-all'):
        my_lock = get_copy_lock(source, db_list, options, True)

    _copy_objects(source, destination, db_list, new_opts)

    # If we are cloning, take the write locks prior to copying data
    if cloning and locking == 'lock-all':
        my_lock = get_copy_lock(source, db_list, options, True, cloning)

    # Copy data
    if not skip_data and not skip_tables:
    
        # Copy tables
        for db_name in db_list:
    
            # Get a Database class instance
            db = Database(source, db_name[0], options)
    
            # Perform the copy
            db.init()
            db.copy_data(db_name[1], options, destination,
                         options.get("threads", False))
            
    # if cloning with lock-all unlock here to avoid system table lock conflicts
    if cloning and locking == 'lock-all':
        my_lock.unlock()

    # Create triggers for all databases
    if not skip_triggers:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_events'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    # Create events for all databases
    if not skip_events:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_triggers'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    if not (cloning and locking == 'lock-all'):
        my_lock.unlock()

    if options.get("rpl_mode", None):
        for cmd in rpl_info[_RPL_COMMANDS]:
            if cmd[0] == '#' and not quiet:
                print cmd
            else:
                if verbose:
                    print cmd
                destination.exec_query(cmd)
        destination.exec_query("START SLAVE;")

    if not quiet:
        print "#...done."
    return True
Example #19
0
def _server_info(server_val, get_defaults=False, options={}):
    """Show information about a running server
    
    This method gathers information from a running server. This information is
    returned as a tuple to be displayed to the user in a format specified. The
    information returned includes the following:
    
    * server connection information
    * version number of the server
    * data directory path
    * base directory path
    * plugin directory path
    * configuration file location and name
    * current binary log file
    * current binary log position
    * current relay log file
    * current relay log position
    
    server_val[in]    the server connection values or a connected server
    get_defaults[in]  if True, get the default settings for the server
    options[in]       options for connecting to the server
    
    Return tuple - information about server
    """
    import tempfile
    
    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.common.tools import get_tool_path

    verbosity = options.get("verbosity", 0)

    # Parse source connection values
    source_values = parse_connection(server_val, None, options)

    # Connect to the server
    conn_options = {
        'version'   : "5.1.30",
    }
    servers = connect_servers(source_values, None, conn_options)
    server = servers[0]

    rows = server.exec_query("SHOW VARIABLES LIKE 'basedir'")
    if rows:
        basedir = rows[0][1]
    else:
        raise UtilError("Unable to determine basedir of running server.")

    my_def_search = []
    my_def_path = get_tool_path(basedir, "my_print_defaults")
    if os.name == "posix":
        my_def_search = ["/etc/my.cnf", "/etc/mysql/my.cnf",
                         os.path.join(basedir, "my.cnf"), "~/.my.cnf"]
    else:
        my_def_search = ["c:\windows\my.ini","c:\my.ini", "c:\my.cnf",
                         os.path.join(os.curdir, "my.ini")]
    my_def_search.append(os.path.join(os.curdir, "my.cnf"))

    # Make 'key' value
    server_id = source_values['host']
    # Use string mapping because port is an integer
    server_id += ":%s" % source_values['port']
    if source_values.get('socket', None) is not None:
        server_id += ":" + source_values.get('socket')

    defaults = []
    if get_defaults:
        if verbosity > 0:
            file = tempfile.TemporaryFile()
        else:
            file = open(os.devnull, "w+b")
        subprocess.call([my_def_path, "mysqld"], stdout=file)
        file.seek(0)
        defaults.append("\nDefaults for server " + server_id)
        for line in file.readlines():
            defaults.append(line.rstrip())

    # Get server version
    version = None
    try:
        res = server.show_server_variable('version')
        version = res[0][1]
    except:
        raise UtilError("Cannot get version for server " + server_id)

    # Find config file
    config_file = ""
    for search_path in my_def_search:
        if os.path.exists(search_path):
            if len(config_file) > 0:
                config_file += ", " + search_path
            else:
                config_file = search_path

    # Find datadir, basedir, plugin-dir, binary log, relay log
    res = server.show_server_variable("datadir")
    datadir = res[0][1]
    res = server.show_server_variable("basedir")
    basedir = res[0][1]
    res = server.show_server_variable("plugin_dir")
    plugin_dir = res[0][1]
    binlog, binlog_pos = _get_binlog(server)
    relay_log, relay_log_pos = _get_relay_log(server)
    server.disconnect()

    return ((server_id, version, datadir, basedir, plugin_dir, config_file,
             binlog, binlog_pos, relay_log, relay_log_pos), defaults)
Example #20
0
def clone_user(src_val, dest_val, base_user, new_user_list, options):
    """Clone a user to one or more new user accounts

    This method will create one or more new user accounts copying the
    grant statements from a given user. If source and destination are the
    same, the copy will occur on a single server otherwise, the caller may
    specify a destination server to where the user accounts will be copied.

    NOTES:
    The user is responsible for making sure the databases and objects
    referenced in the cloned GRANT statements exist prior to running this
    utility.

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    base_user[in]      the user account on the source machine to be used as
                       the template for the new users
    user_list[in]      a list of new user accounts in the form:
                       (username:password@host)
    options[in]        optional parameters dictionary including:
                         dump_sql - if True, print grants for base user
                                    (no new users are created)
                         force    - drop new users if they exist
                         verbosity - print add'l information during operation
                         quiet   - do not print information during operation
                                   Note: Error messages are printed regardless
                         global_privs - include global privileges (i.e. user@%)

    Returns bool True = success, raises UtilError if error
    """
    dump_sql = options.get("dump", False)
    overwrite = options.get("overwrite", False)
    verbosity = options.get("verbosity", False)
    quiet = options.get("quiet", False)
    global_privs = options.get("global_privs", False)

    # Don't require destination for dumping base user grants
    conn_options = {"quiet": quiet, "version": "5.1.0"}

    # Add ssl certs if there are any.
    conn_options["ssl_cert"] = options.get("ssl_cert", None)
    conn_options["ssl_ca"] = options.get("ssl_ca", None)
    conn_options["ssl_key"] = options.get("ssl_key", None)

    if dump_sql:
        servers = connect_servers(src_val, None, conn_options)
    else:
        servers = connect_servers(src_val, dest_val, conn_options)

    source = servers[0]
    destination = servers[1]
    if destination is None:
        destination = servers[0]

    # Create an instance of the user class for source.
    user_source = User(source, base_user, verbosity)

    # Create an instance of the user class for destination.
    user_dest = User(destination, base_user, verbosity)

    # First find out what is the user that will be giving of grants in the
    # destination server.
    try:
        res = destination.exec_query("SELECT CURRENT_USER()")
    except UtilDBError as err:
        raise UtilError(
            "Unable to obtain information about the account used "
            "to connect to the destination server: "
            "{0}".format(err.errmsg)
        )

    # Create an instance of the user who will be giving the privileges.
    user_priv_giver = User(destination, res[0][0], verbosity)

    # Check to ensure base user exists.
    if not user_source.exists(base_user):
        raise UtilError("Base user does not exist!")

    # Process dump operation
    if dump_sql and not quiet:
        _show_user_grants(source, user_source, base_user, verbosity)
        return True

    # Check to ensure new users don't exist.
    if overwrite is None:
        for new_user in new_user_list:
            if user_dest.exists(new_user):
                raise UtilError("User %s already exists. Use --force " "to drop and recreate user." % new_user)

    if not quiet:
        print "# Cloning %d users..." % (len(new_user_list))
    # Check privileges to create/delete users.
    can_create = can_drop = False
    if user_priv_giver.has_privilege("*", "*", "CREATE_USER"):
        can_create = can_drop = True
    else:
        if user_priv_giver.has_privilege("mysql", "*", "INSERT"):
            can_create = True
        if user_priv_giver.has_privilege("mysql", "*", "DELETE"):
            can_drop = True

    if not can_create:  # Destination user cannot create new users.
        raise UtilError(
            "Destination user {0}@{1} needs either the "
            "'CREATE USER' on *.* or 'INSERT' on mysql.* "
            "privilege to create new users."
            "".format(user_priv_giver.user, user_priv_giver.host)
        )

    # Perform the clone here. Loop through new users and clone.
    for new_user in new_user_list:
        if not quiet:
            print "# Cloning %s to user %s " % (base_user, new_user)
        # Check to see if user exists.
        if user_dest.exists(new_user):
            if not can_drop:  # Destination user cannot drop existing users.
                raise UtilError(
                    "Destination user {0}@{1} needs either the "
                    "'CREATE USER' on *.* or 'DELETE' on mysql.* "
                    "privilege to drop existing users."
                    "".format(user_priv_giver.user, user_priv_giver.host)
                )

            user_dest.drop(new_user)
        # Clone user.
        try:
            missing_privs = user_priv_giver.missing_user_privileges(user_source, plus_grant_option=True)
            if not missing_privs:
                user_source.clone(new_user, destination, global_privs)
            else:
                # Our user lacks some privileges, lets create an informative
                # error message
                pluralize = "" if len(missing_privs) == 1 else "s"
                missing_privs_str = ", ".join(
                    ["{0} on {1}.{2}".format(priv, db, table) for priv, db, table in missing_privs]
                )
                raise UtilError(
                    "User {0} cannot be cloned because destination"
                    " user {1}@{2} is missing the following "
                    "privilege{3}: {4}."
                    "".format(new_user, user_priv_giver.user, user_priv_giver.host, pluralize, missing_privs_str)
                )
        except UtilError:
            raise

    if not quiet:
        print "# ...done."

    return True
Example #21
0
def compare_all_databases(server1_val, server2_val, exclude_list, options):
    """Perform a consistency check among all common databases on the servers

    This method gets all databases from the servers, prints any missing
    databases and performs a consistency check among all common databases.

    If any errors or differences are found, the operation will print the
    difference and continue.

    This method will return None if no databases to compare.
    """

    success = True

    # Connect to servers
    conn_options = {
        "quiet": options.get("quiet", False),
        "src_name": "server1",
        "dest_name": "server2",

    }
    server1, server2 = connect_servers(server1_val, server2_val, conn_options)

    # Check if the specified servers are the same.
    if server1.port == server2.port and server1.is_alias(server2.host):
        raise UtilError(
            "Specified servers are the same (server1={host1}:{port1} and "
            "server2={host2}:{port2}). Cannot compare all databases on the "
            "same server.".format(host1=server1.host, port1=server1.port,
                                  host2=server2.host, port2=server2.port))

    # Get all databases, except those used in --exclude
    get_dbs_query = """
        SELECT SCHEMA_NAME
        FROM INFORMATION_SCHEMA.SCHEMATA
        WHERE SCHEMA_NAME != 'INFORMATION_SCHEMA'
        AND SCHEMA_NAME != 'PERFORMANCE_SCHEMA'
        AND SCHEMA_NAME != 'mysql'
        {0}"""
    conditions = ""
    if exclude_list:
        # Add extra where to exclude databases in exclude_list
        operator = 'REGEXP' if options['use_regexp'] else 'LIKE'
        conditions = "AND {0}".format(" AND ".join(
            ["SCHEMA_NAME NOT {0} '{1}'".format(operator, db)
             for db in exclude_list])
        )

    server1_dbs = set(
        [db[0] for db in server1.exec_query(get_dbs_query.format(conditions))]
    )
    server2_dbs = set(
        [db[0] for db in server2.exec_query(get_dbs_query.format(conditions))]
    )

    # Check missing databases
    if options['changes-for'] == 'server1':
        diff_dbs = server1_dbs.difference(server2_dbs)
        for db in diff_dbs:
            msg = _ERROR_DB_MISSING_ON_SERVER.format(db, "server1", "server2")
            print("# {0}".format(msg))
    else:
        diff_dbs = server2_dbs.difference(server1_dbs)
        for db in diff_dbs:
            msg = _ERROR_DB_MISSING_ON_SERVER.format(db, "server2", "server1")
            print("# {0}".format(msg))

    # Compare databases in common
    common_dbs = server1_dbs.intersection(server2_dbs)
    if common_dbs:
        print("# Comparing databases: {0}".format(", ".join(common_dbs)))
    else:
        success = None
    for db in common_dbs:
        try:
            res = database_compare(server1_val, server2_val, db, db, options)
            if not res:
                success = False
            print("\n")
        except UtilError as err:
            print("ERROR: {0}\n".format(err.errmsg))
            success = False

    return success
Example #22
0
def _export_table_data(source_srv, table, output_file, options):
    """Export the table data.

    This private method retrieves the data for the specified table in SQL
    format (e.g., INSERT statements) or in a tabular form (GRID, TAB, CSV,
    VERTICAL) to the specified output file or a separated file, according to
    the defined options.

    source_srv[in]  Server instance or dictionary with connection values.
    table[in]       Table to export, tuple with database name and table name.
    output_file[in] Output file to store the export data.
    options[in]     Dictionary containing the options for the export:
                        (skip_tables, skip_views, skip_triggers, skip_procs,
                        skip_funcs, skip_events, skip_grants, skip_create,
                        skip_data, no_header, display, format, file_per_tbl,
                        and debug).

    return a filename if a temporary file is created to store the output result
    (used for multiprocessing) otherwise None.
    """
    frmt = options.get("format", "sql")
    no_headers = options.get("no_headers", True)
    single = options.get("single", False)
    skip_blobs = options.get("skip_blobs", False)
    quiet = options.get("quiet", False)
    file_per_table = options.get("file_per_tbl", False)

    # Handle source server instance or server connection values.
    # Note: For multiprocessing the use of connection values instead of a
    # server instance is required to avoid internal errors.
    if isinstance(source_srv, Server):
        source = source_srv
    else:
        # Get source server instance from connection values.
        conn_options = {
            'quiet': True,  # Avoid repeating output for multiprocessing.
            'version': "5.1.30",
        }
        servers = connect_servers(source_srv, None, conn_options)
        source = servers[0]

    # Handle qualified table name (with backtick quotes).
    db_name = table[0]
    tbl_name = "{0}.{1}".format(db_name, table[1])
    q_db_name = quote_with_backticks(db_name)
    q_tbl_name = "{0}.{1}".format(q_db_name, quote_with_backticks(table[1]))

    # Determine output file to store exported table data.
    if file_per_table:
        # Store result of table export to a separated file.
        file_name = _generate_tbl_filename(tbl_name, frmt)
        outfile = open(file_name, "w+")
        tempfile_used = False
    else:
        if output_file:
            # Output file to store result is defined.
            outfile = output_file
            tempfile_used = False
        else:
            # Store result in a temporary file (merged later).
            # Used by multiprocess export.
            tempfile_used = True
            outfile = tempfile.NamedTemporaryFile(delete=False)

    message = "# Data for table {0}:".format(q_tbl_name)
    outfile.write("{0}\n".format(message))

    tbl_options = {'verbose': False, 'get_cols': True, 'quiet': quiet}
    cur_table = Table(source, q_tbl_name, tbl_options)
    if single and frmt not in ("sql", "grid", "vertical"):
        retrieval_mode = -1
        first = True
    else:
        retrieval_mode = 1
        first = False

    for data_rows in cur_table.retrieve_rows(retrieval_mode):
        _export_row(data_rows, cur_table, frmt, single, skip_blobs, first,
                    no_headers, outfile)
        if first:
            first = False

    if file_per_table:
        outfile.close()

    return outfile.name if tempfile_used else None
Example #23
0
def _server_info(server_val, get_defaults=False, options=None):
    """Show information about a running server

    This method gathers information from a running server. This information is
    returned as a tuple to be displayed to the user in a format specified. The
    information returned includes the following:

    * server connection information
    * version number of the server
    * data directory path
    * base directory path
    * plugin directory path
    * configuration file location and name
    * current binary log file
    * current binary log position
    * current relay log file
    * current relay log position

    server_val[in]    the server connection values or a connected server
    get_defaults[in]  if True, get the default settings for the server
    options[in]       options for connecting to the server

    Return tuple - information about server
    """
    if options is None:
        options = {}
    # Parse source connection values
    source_values = parse_connection(server_val, None, options)

    # Connect to the server
    conn_options = {
        'version': "5.1.30",
    }
    servers = connect_servers(source_values, None, conn_options)
    server = servers[0]

    params_dict = defaultdict(str)

    # Initialize list of warnings
    params_dict['warnings'] = []

    # Identify server by string: 'host:port[:socket]'.
    server_id = "{0}:{1}".format(source_values['host'], source_values['port'])
    if source_values.get('socket', None):
        server_id = "{0}:{1}".format(server_id, source_values.get('socket'))
    params_dict['server'] = server_id

    # Get _SERVER_VARIABLES values from the server
    for server_var in _SERVER_VARIABLES:
        res = server.show_server_variable(server_var)
        if res:
            params_dict[server_var] = res[0][1]
        else:
            raise UtilError("Unable to determine {0} of server '{1}'"
                            ".".format(server_var, server_id))

    # Get _LOG_FILES_VARIABLES values from the server
    for msg, log_tpl in _LOG_FILES_VARIABLES.iteritems():
        res = server.show_server_variable(log_tpl.log_name)
        if res:
            # Check if log is turned off
            params_dict[log_tpl.log_name] = res[0][1]
            # If logs are turned off, skip checking information about the file
            if res[0][1] in ('', 'OFF'):
                continue

            # Logging is enabled, so we can get get information about log_file
            # unless it is log_error because in that case we already have it.
            if log_tpl.log_file is not None:  # if it is not log_error
                log_file = server.show_server_variable(log_tpl.log_file)[0][1]
                params_dict[log_tpl.log_file] = log_file
            else:  # log error, so log_file_name is already on params_dict
                log_file = params_dict[log_tpl.log_name]

            # Now get the information about the size of the logs
            # If log file is stderr, we cannot get the correct size.
            if log_file not in ["stderr", "stdout"]:
                # Now get the information about the size of the logs
                try:
                    params_dict[log_tpl.log_file_size] = "{0} bytes".format(
                        os.path.getsize(log_file))

                except os.error:
                    # if we are unable to get the log_file_size
                    params_dict[log_tpl.log_file_size] = ''
                    warning_msg = _WARNING_TEMPLATE.format(msg, log_file)
                    params_dict['warnings'].append(warning_msg)

        else:
            params_dict['warnings'].append(
                "Unable to get information "
                "regarding variable '{0}'").format(msg)

    # if audit_log plugin is installed and enabled
    if server.supports_plugin('audit'):
        res = server.show_server_variable('audit_log_file')
        if res:
            # Audit_log variable might be a relative path to the datadir,
            # so it needs to be treated accordingly
            if not os.path.isabs(res[0][1]):
                params_dict['audit_log_file'] = os.path.join(
                    params_dict['datadir'], res[0][1])
            else:
                params_dict['audit_log_file'] = res[0][1]

            # Add audit_log field to the _COLUMNS List unless it is already
            # there
            if 'audit_log_file' not in _COLUMNS_SET:
                _COLUMNS.append('audit_log_file')
                _COLUMNS.append('audit_log_file_size')
                _COLUMNS_SET.add('audit_log_file')
            try:
                params_dict['audit_log_file_size'] = "{0} bytes".format(
                    os.path.getsize(params_dict['audit_log_file']))

            except os.error:
                # If we are unable to get the size of the audit_log_file
                params_dict['audit_log_file_size'] = ''
                warning_msg = _WARNING_TEMPLATE.format(
                    "audit log", params_dict['audit_log_file'])
                params_dict['warnings'].append(warning_msg)

    # Build search path for config files
    if os.name == "posix":
        my_def_search = [
            "/etc/my.cnf", "/etc/mysql/my.cnf",
            os.path.join(params_dict['basedir'], "my.cnf"), "~/.my.cnf"
        ]
    else:
        my_def_search = [
            r"c:\windows\my.ini", r"c:\my.ini", r"c:\my.cnf",
            os.path.join(os.curdir, "my.ini")
        ]
    my_def_search.append(os.path.join(os.curdir, "my.cnf"))

    # Get server's default configuration values.
    defaults = []
    if get_defaults:
        # Can only get defaults for local servers (need to access local data).
        if server.is_alias('localhost'):
            try:
                my_def_path = get_tool_path(params_dict['basedir'],
                                            "my_print_defaults")
            except UtilError as err:
                raise UtilError("Unable to retrieve the defaults data "
                                "(requires access to my_print_defaults): {0} "
                                "(basedir: {1})".format(
                                    err.errmsg, params_dict['basedir']))
            out_file = tempfile.TemporaryFile()
            # Execute tool: <basedir>/my_print_defaults mysqld
            subprocess.call([my_def_path, "mysqld"], stdout=out_file)
            out_file.seek(0)
            # Get defaults data from temp output file.
            defaults.append("\nDefaults for server {0}".format(server_id))
            for line in out_file.readlines():
                defaults.append(line.rstrip())
        else:
            # Remote server; Cannot get the defaults data.
            defaults.append("\nWARNING: The utility can not get defaults from "
                            "a remote host.")

    # Find config file
    config_file = ""
    for search_path in my_def_search:
        if os.path.exists(search_path):
            if len(config_file) > 0:
                config_file = "{0}, {1}".format(config_file, search_path)
            else:
                config_file = search_path
    params_dict['config_file'] = config_file

    # Find binary log, relay log
    params_dict['binary_log'], params_dict['binary_log_pos'] = _get_binlog(
        server)
    params_dict['relay_log'], params_dict['relay_log_pos'] = _get_relay_log(
        server)

    server.disconnect()

    return params_dict, defaults
Example #24
0
    # Parse source connection values if --server provided
    if opt.server is not None and not opt.basedir:
        try:
            ssl_opts = get_ssl_dict(opt)
            source_values = parse_connection(opt.server, None, ssl_opts)
        except FormatError as err:
            parser.error("Source connection values invalid: %s." % err)
        except UtilError as err:
            parser.error("Source connection values invalid: %s." % err.errmsg)

        try:
            conn_options = {
                'version': "5.1.30",
                'quiet': opt.quiet,
            }
            servers = connect_servers(source_values, None, conn_options)
        except UtilError as error:
            parser.error(error.errmsg)
        server = servers[0]

        if use_port == int(server.port):
            parser.error("You must specify a different port to use for the "
                         "spawned server.")

        basedir = server.show_server_variable("basedir")[0][1]
    else:
        basedir = opt.basedir

    # Set options for frm operations.
    options = {
        "basedir": basedir,
Example #25
0
def copy_db(src_val, dest_val, db_list, options):
    """Copy a database

    This method will copy a database and all of its objects and data from
    one server (source) to another (destination). Options are available to
    selectively ignore each type of object. The force parameter is
    used to permit the copy to overwrite an existing destination database
    (default is to not overwrite).

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, verbose, force, quiet,
                       connections, debug, exclude_names, exclude_patterns)

    Notes:
        force    - if True, the database on the destination will be dropped
                   if it exists (default is False)
        quiet    - do not print any information during operation
                   (default is False)

    Returns bool True = success, False = error
    """
    verbose = options.get("verbose", False)
    quiet = options.get("quiet", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)
    skip_data = options.get("skip_data", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_tables = options.get("skip_tables", False)
    skip_gtid = options.get("skip_gtid", False)
    locking = options.get("locking", "snapshot")

    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(src_val, dest_val, conn_options)
    cloning = (src_val == dest_val) or dest_val is None

    source = servers[0]
    if cloning:
        destination = servers[0]
    else:
        destination = servers[1]

    src_gtid = source.supports_gtid() == 'ON'
    dest_gtid = destination.supports_gtid() == 'ON' if destination else False

    # Get list of all databases from source if --all is specified.
    # Ignore system databases.
    if options.get("all", False):
        # The --all option is valid only if not cloning.
        if not cloning:
            if not quiet:
                print "# Including all databases."
            rows = source.get_all_databases()
            for row in rows:
                db_list.append((row[0], None))  # Keep same name
        else:
            raise UtilError("Cannot copy all databases on the same server.")
    elif not skip_gtid and src_gtid:
        # Check to see if this is a full copy (complete backup)
        all_dbs = source.exec_query("SHOW DATABASES")
        dbs = [db[0] for db in db_list]
        for db in all_dbs:
            if db[0].upper() in [
                    "MYSQL", "INFORMATION_SCHEMA", "PERFORMANCE_SCHEMA"
            ]:
                continue
            if not db[0] in dbs:
                print _GTID_BACKUP_WARNING
                break

    # Do error checking and preliminary work:
    #  - Check user permissions on source and destination for all databases
    #  - Check to see if executing on same server but same db name (error)
    #  - Build list of tables to lock for copying data (if no skipping data)
    #  - Check storage engine compatibility
    for db_name in db_list:
        source_db = Database(source, db_name[0])
        if destination is None:
            destination = source
        if db_name[1] is None:
            db = db_name[0]
        else:
            db = db_name[1]
        dest_db = Database(destination, db)

        # Make a dictionary of the options
        access_options = {
            'skip_views': skip_views,
            'skip_procs': skip_procs,
            'skip_funcs': skip_funcs,
            'skip_grants': skip_grants,
            'skip_events': skip_events,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)

        dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                   access_options)

        # Error is source db and destination db are the same and we're cloning
        if destination == source and db_name[0] == db_name[1]:
            raise UtilError("Destination database name is same as "
                            "source - source = %s, destination = %s" %
                            (db_name[0], db_name[1]))

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilError("Source database does not exist - %s" % db_name[0])

        # Check storage engines
        check_engine_options(destination, options.get("new_engine", None),
                             options.get("def_engine", None), False,
                             options.get("quiet", False))

    # Get replication commands if rpl_mode specified.
    # if --rpl specified, dump replication initial commands
    rpl_info = None

    # Turn off foreign keys if they were on at the start
    destination.disable_foreign_key_checks(True)

    # Get GTID commands
    if not skip_gtid:
        gtid_info = get_gtid_commands(source)
        if src_gtid and not dest_gtid:
            print _NON_GTID_WARNING % ("destination", "source", "to")
        elif not src_gtid and dest_gtid:
            print _NON_GTID_WARNING % ("source", "destination", "from")
    else:
        gtid_info = None
        if src_gtid and not cloning:
            print _GTID_WARNING

    # If cloning, turn off gtid generation
    if gtid_info and cloning:
        gtid_info = None
    # if GTIDs enabled, write the GTID commands
    if gtid_info and dest_gtid:
        # Check GTID version for complete feature support
        destination.check_gtid_version()
        # Check the gtid_purged value too
        destination.check_gtid_executed()
        for cmd in gtid_info[0]:
            print "# GTID operation:", cmd
            destination.exec_query(cmd, {'fetch': False, 'commit': False})

    if options.get("rpl_mode", None):
        new_opts = options.copy()
        new_opts['multiline'] = False
        new_opts['strict'] = True
        rpl_info = get_change_master_command(src_val, new_opts)
        destination.exec_query("STOP SLAVE", {'fetch': False, 'commit': False})

    # Copy (create) objects.
    # We need to delay trigger and events to after data is loaded
    new_opts = options.copy()
    new_opts['skip_triggers'] = True
    new_opts['skip_events'] = True

    # Get the table locks unless we are cloning with lock-all
    if not (cloning and locking == 'lock-all'):
        my_lock = get_copy_lock(source, db_list, options, True)

    _copy_objects(source, destination, db_list, new_opts)

    # If we are cloning, take the write locks prior to copying data
    if cloning and locking == 'lock-all':
        my_lock = get_copy_lock(source, db_list, options, True, cloning)

    # Copy tables data
    if not skip_data and not skip_tables:

        # Copy tables
        for db_name in db_list:

            # Get a Database class instance
            db = Database(source, db_name[0], options)

            # Perform the copy
            # Note: No longer use threads, use multiprocessing instead.
            db.init()
            db.copy_data(db_name[1],
                         options,
                         destination,
                         connections=1,
                         src_con_val=src_val,
                         dest_con_val=dest_val)

    # if cloning with lock-all unlock here to avoid system table lock conflicts
    if cloning and locking == 'lock-all':
        my_lock.unlock()

    # Create triggers for all databases
    if not skip_triggers:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_events'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    # Create events for all databases
    if not skip_events:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_triggers'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    if not (cloning and locking == 'lock-all'):
        my_lock.unlock()

    # if GTIDs enabled, write the GTID-related commands
    if gtid_info and dest_gtid:
        print "# GTID operation:", gtid_info[1]
        destination.exec_query(gtid_info[1])

    if options.get("rpl_mode", None):
        for cmd in rpl_info[_RPL_COMMANDS]:
            if cmd[0] == '#' and not quiet:
                print cmd
            else:
                if verbose:
                    print cmd
                destination.exec_query(cmd)
        destination.exec_query("START SLAVE;")

    # Turn on foreign keys if they were on at the start
    destination.disable_foreign_key_checks(False)

    if not quiet:
        print "#...done."
    return True
Example #26
0
def check_index(src_val, table_args, options):
    """Check for duplicate or redundant indexes for one or more tables

    This method will examine the indexes for one or more tables and identify
    any indexes that are potential duplicates or redundant. It prints the
    equivalent DROP statements if selected.

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    table_args[in]     list of tables in the form 'db.table' or 'db'
    options[in]        dictionary of options to include:
                         show-drops   : show drop statements for dupe indexes
                         skip         : skip non-existent tables
                         verbosity    : print extra information
                         show-indexes : show all indexes for each table
                         index-format : index format = sql, table, tab, csv
                         worst        : show worst performing indexes
                         best         : show best performing indexes
                         report-indexes : reports tables without PK or UK

    Returns bool True = success, raises UtilError if error
    """

    # Get options
    show_drops = options.get("show-drops", False)
    skip = options.get("skip", False)
    verbosity = options.get("verbosity", False)
    show_indexes = options.get("show-indexes", False)
    index_format = options.get("index-format", False)
    stats = options.get("stats", False)
    first_indexes = options.get("best", None)
    last_indexes = options.get("worst", None)
    report_indexes = options.get("report-indexes", False)

    # Try to connect to the MySQL database server.
    conn_options = {
        'quiet': verbosity == 1,
        'version': "5.0.0",
    }
    servers = connect_servers(src_val, None, conn_options)

    source = servers[0]

    db_list = []     # list of databases
    table_list = []  # list of all tables to process

    # Build a list of objects to process
    # 1. start with db_list if no objects present on command line
    # 2. process command line options.
    # 3. loop through database list and add all tables
    # 4. check indexes

    obj_name_regexp = re.compile(REGEXP_QUALIFIED_OBJ_NAME)

    # Perform the options check here. Loop through objects presented.
    for obj in table_args:
        m_obj = obj_name_regexp.match(obj)
        # Check if a valid database/table name is specified.
        if not m_obj:
            raise UtilError(PARSE_ERR_OBJ_NAME_FORMAT.format(
                obj_name=obj, option="the database/table arguments"))
        else:
            db_name, obj_name = m_obj.groups()
            if obj_name:
                # Table specified
                table_list.append(obj)
            # Else we are operating on a specific database.
            else:
                # Remove backtick quotes.
                db_name = remove_backtick_quoting(db_name) \
                    if is_quoted_with_backticks(db_name) else db_name
                db_list.append(db_name)

    # Loop through database list adding tables
    for db in db_list:
        db_source = Database(source, db)
        db_source.init()
        tables = db_source.get_db_objects("TABLE")
        if not tables and verbosity >= 1:
            print "# Warning: database %s does not exist. Skipping." % (db)
        for table in tables:
            table_list.append("{0}.{1}".format(quote_with_backticks(db),
                                               quote_with_backticks(table[0])))

    # Fail if no tables to check
    if not table_list:
        raise UtilError("No tables to check.")

    if verbosity > 1:
        print "# Checking indexes..."
    # Check indexes for each table in the list
    for table_name in table_list:
        tbl_options = {
            'verbose': verbosity >= 1,
            'get_cols': False,
            'quiet': verbosity is None or verbosity < 1
        }
        tbl = Table(source, table_name, tbl_options)
        exists = tbl.exists()
        if not exists and not skip:
            raise UtilError("Table %s does not exist. Use --skip "
                            "to skip missing tables." % table_name)
        if exists:
            if not tbl.get_indexes():
                if verbosity > 1 or report_indexes:
                    print "# Table %s is not indexed." % (table_name)
            else:
                if show_indexes:
                    tbl.print_indexes(index_format, verbosity)
                    # Show if table has primary key
                if verbosity > 1 or report_indexes:
                    if not tbl.has_primary_key():
                        if not tbl.has_unique_key():
                            print("# Table {0} does not contain neither a "
                                  "PRIMARY nor UNIQUE key.".format(table_name))
                        else:
                            print("# Table {0} does not contain a PRIMARY key."
                                  "".format(table_name))
                tbl.check_indexes(show_drops)

            # Show best and/or worst indexes
            if stats:
                if first_indexes is not None:
                    tbl.show_special_indexes(index_format, first_indexes, True)
                if last_indexes is not None:
                    tbl.show_special_indexes(index_format, last_indexes)

        if verbosity > 1:
            print "#"

    if verbosity > 1:
        print "# ...done."
Example #27
0
def check_grants(server_cnx_val, options, dict_of_objects):
    """Show list of privileges over a set of objects

    This function creates a GrantShow object which shows the list of
    users with (the optionally specified list of ) privileges over the
    specified set of objects.

    server_cnx_val[in]      Dictionary with the connection values to the
                            server.
    options[in]             Dictionary of options (verbosity, privileges,
                            show_mode).
    list_of_objects[in]     Dictionary of objects (set of databases, tables
                            and procedures) by database to check.

    """

    # Create server connection:
    server = connect_servers(server_cnx_val, None, options)[0]

    # Check user permissions to consult the grant information.
    _check_privileges(server)

    # Validate the dict of objects against our server.
    valid_dict_of_objects = validate_obj_type_dict(server, dict_of_objects)

    # Get optional list of required privileges
    req_privs = set(options['privileges']) if options['privileges'] else None

    # If we specify some privileges that are not valid for all the objects
    # print warning message stating that some will be ignored.
    if req_privs:
        for obj_type in valid_dict_of_objects:
            # get list of privileges that applies to the object type
            filtered_req_privs = filter_grants(req_privs, obj_type)
            # if the size of the set is different that means that some of the
            # privileges cannot be applied to this object type, print warning
            if len(filtered_req_privs) != len(req_privs):
                if obj_type.upper() == DATABASE_TYPE:
                    obj_lst = [obj_tpl[0] for obj_tpl in
                               valid_dict_of_objects[obj_type]]
                else:
                    obj_lst = [".".join(obj_tpl) for obj_tpl in
                               valid_dict_of_objects[obj_type]]
                obj_lst_str = join_and_build_str(obj_lst)
                missing_privs = sorted(req_privs - filtered_req_privs)
                priv_str = join_and_build_str(missing_privs)
                verb = "do" if len(missing_privs) > 1 else "does"
                print("# WARNING: {0} {1} not apply to {2}s "
                      "and will be ignored for: {3}.".format(
                          priv_str, verb, obj_type.lower(), obj_lst_str))

    # get the grantee information dictionary
    grantee_info_dict = get_grantees(server, valid_dict_of_objects,
                                     req_privileges=req_privs)

    # Print the information
    obj_type_lst = [DATABASE_TYPE, TABLE_TYPE, ROUTINE_TYPE]
    for obj_type in obj_type_lst:
        if obj_type in grantee_info_dict:
            # Sort by object name
            for obj_name in sorted(grantee_info_dict[obj_type]):
                print("\n# {0} {1}:".format(obj_type, obj_name))
                if options['show_mode'] == 'users':
                    # Sort by grantee name
                    output_str = ", ".join(
                        sorted(grantee_info_dict[obj_type][obj_name].keys()))
                    print("# - {0}".format(output_str))
                elif options['show_mode'] == 'user_grants':
                    # Sort by grantee name
                    for grantee, priv_set in sorted(
                            grantee_info_dict[obj_type][obj_name].iteritems()):
                        # print privileges sorted by name
                        print("# - {0} : {1}".format(
                            grantee, ", ".join(sorted(priv_set))))
                else:  # raw mode
                    # Sort by grantee name
                    for grantee in sorted(
                            grantee_info_dict[obj_type][obj_name].keys()):
                        user = User(server, grantee)
                        grant_stms = sorted(
                            user.get_grants_for_object(obj_name, obj_type))
                        if grant_stms:
                            print("# - For {0}".format(grantee))
                        for grant_stm in grant_stms:
                            print("{0}".format(grant_stm))
# Parse source connection values
try:
    source_values = parse_connection(opt.server, None, opt)
except FormatError:
    _, err, _ = sys.exc_info()
    parser.error("Source connection values invalid: %s." % err)
except UtilError:
    _, err, _ = sys.exc_info()
    parser.error("Source connection values invalid: %s." % err.errmsg)

try:
    conn_options = {
        'version'   : "5.1.30",
    }
    servers = connect_servers(source_values, None)
except UtilError:
    _, e, _ = sys.exc_info()
    parser.error(e.errmsg)

try:
    res = servers[0].show_server_variable("datadir")
    datadir = res[0][1]
except UtilError:
    _, e, _ = sys.exc_info()
    parser.error(e.errmsg)

if not os.access(datadir, os.R_OK):
    print("\nNOTICE: Your user account does not have read access to the "
          "datadir. Data sizes will be calculated and actual file sizes "
          "may be omitted. Some features may be unavailable.\n")
Example #29
0
        server_values = parse_connection(opt.server, None, options)
    except FormatError:
        _, err, _ = sys.exc_info()
        parser.error("Server connection values invalid: {0}.".format(err))
    except UtilError:
        _, err, _ = sys.exc_info()
        parser.error("Server connection values invalid: "
                     "{0}.".format(err.errmsg))

    # Get the sql_mode set on source and destination server
    conn_opts = {
        'quiet': True,
        'version': "5.1.30",
    }
    try:
        servers = connect_servers(server_values, None, conn_opts)
        server_sql_mode = servers[0].select_variable("SQL_MODE")
    except UtilError:
        server_sql_mode = ''

    # Check values for --format=raw_csv
    if opt.format == "raw_csv":
        if not opt.table:
            print("ERROR: You must provide --table while using "
                  "--format=raw_csv.")
            sys.exit(1)
        # Validate table name using format <db>.<table>
        table_regex = REGEXP_QUALIFIED_OBJ_NAME
        if "ANSI_QUOTES" in server_sql_mode:
            table_regex = REGEXP_QUALIFIED_OBJ_NAME_AQ
        table_re = re.compile(r"{0}(?:\.){0}".format(table_regex))
Example #30
0
def check_index(src_val, table_args, options):
    """Check for duplicate or redundant indexes for one or more tables
    
    This method will examine the indexes for one or more tables and identify
    any indexes that are potential duplicates or redundant. It prints the
    equivalent DROP statements if selected.
    
    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    table_args[in]     list of tables in the form 'db.table' or 'db'
    options[in]        dictionary of options to include:
                         show-drops   : show drop statements for dupe indexes
                         skip         : skip non-existant tables
                         verbosity    : print extra information
                         show-indexes : show all indexes for each table
                         index-format : index format = sql, table, tab, csv
                         worst        : show worst performing indexes
                         best         : show best performing indexes
    
    Returns bool True = success, raises UtilError if error
    """
    
    # Get options
    show_drops = options.get("show-drops", False)
    skip = options.get("skip", False)
    verbosity = options.get("verbosity", False)
    show_indexes = options.get("show-indexes", False)
    index_format = options.get("index-format", False)
    stats = options.get("stats", False)
    first_indexes = options.get("best", None)        
    last_indexes = options.get("worst", None)

    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.common.database import Database
    from mysql.utilities.common.table import Table

    # Try to connect to the MySQL database server.
    conn_options = {
        'quiet'     : verbosity == 1,
        'version'   : "5.0.0",
    }
    servers = connect_servers(src_val, None, conn_options)

    source = servers[0]

    db_list = []     # list of databases
    table_list = []  # list of all tables to process
    
    # Build a list of objects to process
    # 1. start with db_list if no obects present on command line
    # 2. process command line options.
    # 3. loop through database list and add all tables
    # 4. check indexes
        
    # Perform the options check here. Loop through objects presented.
    for obj in table_args:
        # If a . appears, we are operating on a specific table
        idx = obj.count(".")
        if (idx == 1):
            table_list.append(obj)
        # Else we are operating on a specific database.
        else:
            db_list.append(obj)
    
    # Loop through database list adding tables
    for db in db_list:
        db_source = Database(source, db)
        db_source.init()
        tables = db_source.get_db_objects("TABLE")
        if not tables and verbosity >= 1:
            print "# Warning: database %s does not exist. Skipping." % (db)
        for table in tables:
            table_list.append(db + "." + table[0])

    # Fail if no tables to check
    if not table_list:
        raise UtilError("No tables to check.")

    if verbosity > 1:
        print "# Checking indexes..."
    # Check indexes for each table in the list
    for table_name in table_list:
        tbl_options = {
            'verbose'  : verbosity >= 1,
            'get_cols' : False,
            'quiet'    : verbosity is None or verbosity < 1
        }
        tbl = Table(source, table_name, tbl_options)
        exists = tbl.exists()
        if not exists and not skip:
            raise UtilError("Table %s does not exist. Use --skip "
                                 "to skip missing tables." % table_name)
        if exists:
            if not tbl.get_indexes():
                if verbosity > 1:
                    print "# Table %s is not indexed." % (table_name)
            else:
                if show_indexes:
                    tbl.print_indexes(index_format)
                    # Show if table has primary key
                if not tbl.has_primary_key():
                    if verbosity > 1:
                        print "#   Table %s does not contain a PRIMARY key."
                tbl.check_indexes(show_drops)
                
            # Show best and/or worst indexes
            if stats:
                if first_indexes is not None:
                    tbl.show_special_indexes(index_format, first_indexes, True)
                if last_indexes is not None:
                    tbl.show_special_indexes(index_format, last_indexes)
                
        if verbosity > 1:
            print "#"

    if verbosity > 1:    
        print "# ...done."
Example #31
0
def clone_user(src_val, dest_val, base_user, new_user_list, options):
    """Clone a user to one or more new user accounts

    This method will create one or more new user accounts copying the
    grant statements from a given user. If source and destination are the
    same, the copy will occur on a single server otherwise, the caller may
    specify a destination server to where the user accounts will be copied.

    NOTES:
    The user is responsible for making sure the databases and objects
    referenced in the cloned GRANT statements exist prior to running this
    utility.

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    base_user[in]      the user account on the source machine to be used as
                       the template for the new users
    user_list[in]      a list of new user accounts in the form:
                       (username:password@host)
    options[in]        optional parameters dictionary including:
                         dump_sql - if True, print grants for base user
                                    (no new users are created)
                         force    - drop new users if they exist
                         verbosity - print add'l information during operation
                         quiet   - do not print information during operation
                                   Note: Error messages are printed regardless
                         global_privs - include global privileges (i.e. user@%)

    Returns bool True = success, raises UtilError if error
    """

    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.common.user import User

    dump_sql = options.get("dump", False)
    overwrite = options.get("overwrite", False)
    verbosity = options.get("verbosity", False)
    quiet = options.get("quiet", False)
    global_privs = options.get("global_privs", False)

    # Don't require destination for dumping base user grants
    conn_options = {
        'quiet'     : quiet,
        'version'   : "5.1.0",
    }
    if dump_sql:
        servers = connect_servers(src_val, None, conn_options)
    else:
        servers = connect_servers(src_val, dest_val, conn_options)

    source = servers[0]
    destination = servers[1]
    if destination is None:
        destination = servers[0]

    # Create an instance of the user class for source.
    user_source = User(source, base_user, verbosity)

    # Create an instance of the user class for destination.
    user_dest = User(destination, base_user, verbosity)

    # Check to ensure base user exists.
    if not user_source.exists(base_user):
        raise UtilError("Base user does not exist!")

    # Process dump operation
    if dump_sql and not quiet:
        print "Dumping grants for user " + base_user
        user_source.print_grants()
        return True

    # Check to ensure new users don't exist.
    if overwrite is None:
        for new_user in new_user_list:
            if user_dest.exists(new_user):
                raise UtilError("User %s already exists. Use --force "
                      "to drop and recreate user." % new_user)

    if not quiet:
        print "# Cloning %d users..." % (len(new_user_list))

    # Perform the clone here. Loop through new users and clone.
    for new_user in new_user_list:
        if not quiet:
            print "# Cloning %s to user %s " % (base_user, new_user)
        # Check to see if user exists.
        if user_dest.exists(new_user):
            user_dest.drop(new_user)
        # Clone user.
        try:
            user_source.clone(new_user, destination, global_privs)
        except UtilError, e:
            raise
Example #32
0
def clone_user(src_val, dest_val, base_user, new_user_list, options):
    """Clone a user to one or more new user accounts

    This method will create one or more new user accounts copying the
    grant statements from a given user. If source and destination are the
    same, the copy will occur on a single server otherwise, the caller may
    specify a destination server to where the user accounts will be copied.

    NOTES:
    The user is responsible for making sure the databases and objects
    referenced in the cloned GRANT statements exist prior to running this
    utility.

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    base_user[in]      the user account on the source machine to be used as
                       the template for the new users
    user_list[in]      a list of new user accounts in the form:
                       (username:password@host)
    options[in]        optional parameters dictionary including:
                         dump_sql - if True, print grants for base user
                                    (no new users are created)
                         force    - drop new users if they exist
                         verbosity - print add'l information during operation
                         quiet   - do not print information during operation
                                   Note: Error messages are printed regardless
                         global_privs - include global privileges (i.e. user@%)

    Returns bool True = success, raises UtilError if error
    """
    dump_sql = options.get("dump", False)
    overwrite = options.get("overwrite", False)
    verbosity = options.get("verbosity", False)
    quiet = options.get("quiet", False)
    global_privs = options.get("global_privs", False)

    # Don't require destination for dumping base user grants
    conn_options = {
        'quiet': quiet,
        'version': "5.1.0",
    }

    # Add ssl certs if there are any.
    conn_options['ssl_cert'] = options.get("ssl_cert", None)
    conn_options['ssl_ca'] = options.get("ssl_ca", None)
    conn_options['ssl_key'] = options.get("ssl_key", None)

    if dump_sql:
        servers = connect_servers(src_val, None, conn_options)
    else:
        servers = connect_servers(src_val, dest_val, conn_options)

    source = servers[0]
    destination = servers[1]
    if destination is None:
        destination = servers[0]

    # Create an instance of the user class for source.
    user_source = User(source, base_user, verbosity)

    # Create an instance of the user class for destination.
    user_dest = User(destination, base_user, verbosity)

    # First find out what is the user that will be giving of grants in the
    # destination server.
    try:
        res = destination.exec_query("SELECT CURRENT_USER()")
    except UtilDBError as err:
        raise UtilError("Unable to obtain information about the account used "
                        "to connect to the destination server: "
                        "{0}".format(err.errmsg))

    # Create an instance of the user who will be giving the privileges.
    user_priv_giver = User(destination, res[0][0], verbosity)

    # Check to ensure base user exists.
    if not user_source.exists(base_user):
        raise UtilError("Base user does not exist!")

    # Process dump operation
    if dump_sql and not quiet:
        _show_user_grants(source, user_source, base_user, verbosity)
        return True

    # Check to ensure new users don't exist.
    if overwrite is None:
        for new_user in new_user_list:
            if user_dest.exists(new_user):
                raise UtilError("User %s already exists. Use --force "
                                "to drop and recreate user." % new_user)

    if not quiet:
        print "# Cloning %d users..." % (len(new_user_list))
    # Check privileges to create/delete users.
    can_create = can_drop = False
    if user_priv_giver.has_privilege('*', '*', "CREATE_USER"):
        can_create = can_drop = True
    else:
        if user_priv_giver.has_privilege('mysql', '*', "INSERT"):
            can_create = True
        if user_priv_giver.has_privilege('mysql', '*', "DELETE"):
            can_drop = True

    if not can_create:  # Destination user cannot create new users.
        raise UtilError("Destination user {0}@{1} needs either the "
                        "'CREATE USER' on *.* or 'INSERT' on mysql.* "
                        "privilege to create new users."
                        "".format(user_priv_giver.user, user_priv_giver.host))

    # Perform the clone here. Loop through new users and clone.
    for new_user in new_user_list:
        if not quiet:
            print "# Cloning %s to user %s " % (base_user, new_user)
        # Check to see if user exists.
        if user_dest.exists(new_user):
            if not can_drop:  # Destination user cannot drop existing users.
                raise UtilError("Destination user {0}@{1} needs either the "
                                "'CREATE USER' on *.* or 'DELETE' on mysql.* "
                                "privilege to drop existing users."
                                "".format(user_priv_giver.user,
                                          user_priv_giver.host))

            user_dest.drop(new_user)
        # Clone user.
        try:
            missing_privs = user_priv_giver.missing_user_privileges(
                user_source, plus_grant_option=True)
            if not missing_privs:
                user_source.clone(new_user, destination, global_privs)
            else:
                # Our user lacks some privileges, lets create an informative
                # error message
                pluralize = '' if len(missing_privs) == 1 else 's'
                missing_privs_str = ', '.join([
                    "{0} on {1}.{2}".format(priv, db, table)
                    for priv, db, table in missing_privs
                ])
                raise UtilError("User {0} cannot be cloned because destination"
                                " user {1}@{2} is missing the following "
                                "privilege{3}: {4}."
                                "".format(new_user, user_priv_giver.user,
                                          user_priv_giver.host, pluralize,
                                          missing_privs_str))
        except UtilError:
            raise

    if not quiet:
        print "# ...done."

    return True
Example #33
0
def export_databases(server_values, db_list, options):
    """Export one or more databases
    
    This method performs the export of a list of databases first dumping the
    definitions then the data. It supports dumping replication commands (STOP
    SLAVE, CHANGE MASTER, START SLAVE) for exporting data for use in
    replication scenarios.
    
    server_values[in]      server connection value dictionary
    db_list[in]            list of database names
    options[in]            option dictionary
                           Must include the skip_* options for copy and export
    """
    
    from mysql.utilities.command.dbcopy import get_copy_lock
    from mysql.utilities.common.server import connect_servers

    export = options.get("export", "definitions")
    rpl_mode = options.get("rpl_mode", "master")
    quiet = options.get("quiet", False)
    verbosity = options.get("verbosity", 0)
    locking = options.get("locking", "snapshot")
    skip_gtids = options.get("skip_gtid", False) # default is to generate GTIDs
        
    conn_options = {
        'quiet'     : quiet,
        'version'   : "5.1.30",
    }
    servers = connect_servers(server_values, None, conn_options)
    source = servers[0]
    
    # Check for GTID support
    supports_gtid = servers[0].supports_gtid()
    if not skip_gtids and not supports_gtid == 'ON':
        skip_gtids = True
    elif skip_gtids and supports_gtid == 'ON':
        print _GTID_WARNING
        
    if not skip_gtids and supports_gtid == 'ON':
        # Check GTID version for complete feature support
        servers[0].check_gtid_version()
        warning_printed = False
        # Check to see if this is a full export (complete backup)
        all_dbs = servers[0].exec_query("SHOW DATABASES")
        for db in all_dbs:
            if warning_printed:
                continue
            if db[0].upper() in ["MYSQL", "INFORMATION_SCHEMA",
                                 "PERFORMANCE_SCHEMA"]:
                continue
            if not db[0] in db_list:
                print _GTID_BACKUP_WARNING
                warning_printed = True
    
    # Lock tables first
    my_lock = get_copy_lock(source, db_list, options, True)

    # if --rpl specified, write initial replication command
    rpl_info = None
    if rpl_mode:
        rpl_info = get_change_master_command(source, options)
        write_commands(rpl_info[_RPL_FILE], ["STOP SLAVE;"], options)

    # if GTIDs enabled and user requested the output, write the GTID commands
    if skip_gtids:
        gtid_info = None
    else:
        gtid_info = get_gtid_commands(source, options)

    if gtid_info:
        write_commands(sys.stdout, gtid_info[0], options)
        
    # dump metadata
    if export in ("definitions", "both"):
        export_metadata(source, server_values, db_list, options)
        
    # dump data
    if export in ("data", "both"):
        if options.get("display", "brief") != "brief":
            print "# NOTE : --display is ignored for data export."
        export_data(source, server_values, db_list, options)
        
    # if GTIDs enabled, write the GTID-related commands
    if gtid_info:
        write_commands(sys.stdout, [gtid_info[1]], options)
    # if --rpl specified, write replication end command
    if rpl_mode:
        write_commands(rpl_info[_RPL_FILE], rpl_info[_RPL_COMMANDS],
                                options)
        write_commands(rpl_info[_RPL_FILE], ["START SLAVE;"], options)

    my_lock.unlock()
    # Parse source connection values
    try:
        source_values = parse_connection(opt.server, None, opt)
    except FormatError:
        _, err, _ = sys.exc_info()
        parser.error("Source connection values invalid: %s." % err)
    except UtilError:
        _, err, _ = sys.exc_info()
        parser.error("Source connection values invalid: %s." % err.errmsg)

    try:
        conn_options = {
            'version': "5.1.30",
        }
        servers = connect_servers(source_values, None)
    except UtilError:
        _, e, _ = sys.exc_info()
        parser.error(e.errmsg)

    try:
        res = servers[0].show_server_variable("datadir")
        datadir = res[0][1]
    except UtilError:
        _, e, _ = sys.exc_info()
        parser.error(e.errmsg)

    # Flag for testing if is a remote server
    is_remote = not servers[0].is_alias("localhost")

    # Flag for read access to the datadir
Example #35
0
def _server_info(server_val, get_defaults=False, options=None):
    """Show information about a running server

    This method gathers information from a running server. This information is
    returned as a tuple to be displayed to the user in a format specified. The
    information returned includes the following:

    * server connection information
    * version number of the server
    * data directory path
    * base directory path
    * plugin directory path
    * configuration file location and name
    * current binary log file
    * current binary log position
    * current relay log file
    * current relay log position

    server_val[in]    the server connection values or a connected server
    get_defaults[in]  if True, get the default settings for the server
    options[in]       options for connecting to the server

    Return tuple - information about server
    """
    if options is None:
        options = {}
    # Parse source connection values
    source_values = parse_connection(server_val, None, options)

    # Connect to the server
    conn_options = {
        'version': "5.1.30",
    }
    servers = connect_servers(source_values, None, conn_options)
    server = servers[0]

    params_dict = defaultdict(str)

    # Initialize list of warnings
    params_dict['warnings'] = []

    # Identify server by string: 'host:port[:socket]'.
    server_id = "{0}:{1}".format(source_values['host'], source_values['port'])
    if source_values.get('socket', None):
        server_id = "{0}:{1}".format(server_id, source_values.get('socket'))
    params_dict['server'] = server_id

    # Get _SERVER_VARIABLES values from the server
    for server_var in _SERVER_VARIABLES:
        res = server.show_server_variable(server_var)
        if res:
            params_dict[server_var] = res[0][1]
        else:
            raise UtilError("Unable to determine {0} of server '{1}'"
                            ".".format(server_var, server_id))

    # Get _LOG_FILES_VARIABLES values from the server
    for msg, log_tpl in _LOG_FILES_VARIABLES.iteritems():
        res = server.show_server_variable(log_tpl.log_name)
        if res:
            # Check if log is turned off
            params_dict[log_tpl.log_name] = res[0][1]
            # If logs are turned off, skip checking information about the file
            if res[0][1] in ('', 'OFF'):
                continue

            # Logging is enabled, so we can get get information about log_file
            # unless it is log_error because in that case we already have it.
            if log_tpl.log_file is not None:  # if it is not log_error
                log_file = server.show_server_variable(
                    log_tpl.log_file)[0][1]
                params_dict[log_tpl.log_file] = log_file
            else:  # log error, so log_file_name is already on params_dict
                log_file = params_dict[log_tpl.log_name]

            # Now get the information about the size of the logs
            try:
                params_dict[log_tpl.log_file_size] = "{0} bytes".format(
                    os.path.getsize(log_file))

            except os.error:
                # if we are unable to get the log_file_size
                params_dict[log_tpl.log_file_size] = ''
                warning_msg = _WARNING_TEMPLATE.format(msg, log_file)
                params_dict['warnings'].append(warning_msg)
        else:
            params_dict['warnings'].append("Unable to get information "
                                           "regarding variable '{0}'"
                                           ).format(msg)

    # if audit_log plugin is installed and enabled
    if server.supports_plugin('audit'):
        res = server.show_server_variable('audit_log_file')
        if res:
            # Audit_log variable might be a relative path to the datadir,
            # so it needs to be treated accordingly
            if not os.path.isabs(res[0][1]):
                params_dict['audit_log_file'] = os.path.join(
                    params_dict['datadir'], res[0][1])
            else:
                params_dict['audit_log_file'] = res[0][1]

            # Add audit_log field to the _COLUMNS List unless it is already
            # there
            if 'audit_log_file' not in _COLUMNS_SET:
                _COLUMNS.append('audit_log_file')
                _COLUMNS.append('audit_log_file_size')
                _COLUMNS_SET.add('audit_log_file')
            try:
                params_dict['audit_log_file_size'] = "{0} bytes".format(
                    os.path.getsize(params_dict['audit_log_file']))

            except os.error:
                # If we are unable to get the size of the audit_log_file
                params_dict['audit_log_file_size'] = ''
                warning_msg = _WARNING_TEMPLATE.format(
                    "audit log",
                    params_dict['audit_log_file']
                )
                params_dict['warnings'].append(warning_msg)

    # Build search path for config files
    if os.name == "posix":
        my_def_search = ["/etc/my.cnf", "/etc/mysql/my.cnf",
                         os.path.join(params_dict['basedir'], "my.cnf"),
                         "~/.my.cnf"]
    else:
        my_def_search = [r"c:\windows\my.ini", r"c:\my.ini", r"c:\my.cnf",
                         os.path.join(os.curdir, "my.ini")]
    my_def_search.append(os.path.join(os.curdir, "my.cnf"))

    # Get server's default configuration values.
    defaults = []
    if get_defaults:
        # Can only get defaults for local servers (need to access local data).
        if server.is_alias('localhost'):
            try:
                my_def_path = get_tool_path(params_dict['basedir'],
                                            "my_print_defaults")
            except UtilError as err:
                raise UtilError("Unable to retrieve the defaults data "
                                "(requires access to my_print_defaults): {0} "
                                "(basedir: {1})".format(err.errmsg,
                                                        params_dict['basedir'])
                                )
            out_file = tempfile.TemporaryFile()
            # Execute tool: <basedir>/my_print_defaults mysqld
            subprocess.call([my_def_path, "mysqld"], stdout=out_file)
            out_file.seek(0)
            # Get defaults data from temp output file.
            defaults.append("\nDefaults for server {0}".format(server_id))
            for line in out_file.readlines():
                defaults.append(line.rstrip())
        else:
            # Remote server; Cannot get the defaults data.
            defaults.append("\nWARNING: The utility can not get defaults from "
                            "a remote host.")

    # Find config file
    config_file = ""
    for search_path in my_def_search:
        if os.path.exists(search_path):
            if len(config_file) > 0:
                config_file = "{0}, {1}".format(config_file, search_path)
            else:
                config_file = search_path
    params_dict['config_file'] = config_file

    # Find binary log, relay log
    params_dict['binary_log'], params_dict['binary_log_pos'] = _get_binlog(
        server)
    params_dict['relay_log'], params_dict['relay_log_pos'] = _get_relay_log(
        server)

    server.disconnect()

    return params_dict, defaults
Example #36
0
def clone_user(src_val, dest_val, base_user, new_user_list, options):
    """Clone a user to one or more new user accounts

    This method will create one or more new user accounts copying the
    grant statements from a given user. If source and destination are the
    same, the copy will occur on a single server otherwise, the caller may
    specify a destination server to where the user accounts will be copied.

    NOTES:
    The user is responsible for making sure the databases and objects
    referenced in the cloned GRANT statements exist prior to running this
    utility.

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    base_user[in]      the user account on the source machine to be used as
                       the template for the new users
    user_list[in]      a list of new user accounts in the form:
                       (username:password@host)
    options[in]        optional parameters dictionary including:
                         dump_sql - if True, print grants for base user
                                    (no new users are created)
                         force    - drop new users if they exist
                         verbosity - print add'l information during operation
                         quiet   - do not print information during operation
                                   Note: Error messages are printed regardless
                         global_privs - include global privileges (i.e. user@%)

    Returns bool True = success, raises UtilError if error
    """
    dump_sql = options.get("dump", False)
    overwrite = options.get("overwrite", False)
    verbosity = options.get("verbosity", False)
    quiet = options.get("quiet", False)
    global_privs = options.get("global_privs", False)

    # Don't require destination for dumping base user grants
    conn_options = {
        'quiet': quiet,
        'version': "5.1.0",
    }
    if dump_sql:
        servers = connect_servers(src_val, None, conn_options)
    else:
        servers = connect_servers(src_val, dest_val, conn_options)

    source = servers[0]
    destination = servers[1]
    if destination is None:
        destination = servers[0]

    # Create an instance of the user class for source.
    user_source = User(source, base_user, verbosity)

    # Create an instance of the user class for destination.
    user_dest = User(destination, base_user, verbosity)

    # Check to ensure base user exists.
    if not user_source.exists(base_user):
        raise UtilError("Base user does not exist!")

    # Process dump operation
    if dump_sql and not quiet:
        _show_user_grants(source, user_source, base_user, verbosity)
        return True

    # Check to ensure new users don't exist.
    if overwrite is None:
        for new_user in new_user_list:
            if user_dest.exists(new_user):
                raise UtilError("User %s already exists. Use --force "
                                "to drop and recreate user." % new_user)

    if not quiet:
        print "# Cloning %d users..." % (len(new_user_list))

    # Perform the clone here. Loop through new users and clone.
    for new_user in new_user_list:
        if not quiet:
            print "# Cloning %s to user %s " % (base_user, new_user)
        # Check to see if user exists.
        if user_dest.exists(new_user):
            user_dest.drop(new_user)
        # Clone user.
        try:
            user_source.clone(new_user, destination, global_privs)
        except UtilError:
            raise

    if not quiet:
        print "# ...done."

    return True
Example #37
0
    def _setup_replication(self, master_vals, use_rpl_setup=True):
        """Setup replication among a master and a slave.

        master_vals[in]      Master server connection dictionary.
        use_rpl_setup[in]    Use Replication.setup() if True otherwise use
                             switch_master() on the slave. This is used to
                             control the first pass in the masters round-robin
                             scheduling.
        """
        conn_options = {
            "src_name": "master",
            "dest_name": "slave",
            "version": "5.0.0",
            "unique": True,
        }
        (master, slave,) = connect_servers(master_vals, self.slave_vals,
                                           conn_options)
        rpl_options = self.options.copy()
        rpl_options["verbosity"] = self.verbosity > 0

        # Start from beginning only on the first pass
        if rpl_options.get("from_beginning", False) and not use_rpl_setup:
            rpl_options["from_beginning"] = False

        # Create an instance of the replication object
        rpl = Replication(master, slave, rpl_options)

        if use_rpl_setup:
            # Check server ids
            errors = rpl.check_server_ids()
            for error in errors:
                self._report(error, logging.ERROR, True)

            # Check for server_id uniqueness
            errors = rpl.check_server_uuids()
            for error in errors:
                self._report(error, logging.ERROR, True)

            # Check InnoDB compatibility
            errors = rpl.check_innodb_compatibility(self.options)
            for error in errors:
                self._report(error, logging.ERROR, True)

            # Checking storage engines
            errors = rpl.check_storage_engines(self.options)
            for error in errors:
                self._report(error, logging.ERROR, True)

            # Check master for binary logging
            errors = rpl.check_master_binlog()
            if not errors == []:
                raise UtilRplError(errors[0])

            # Setup replication
            if not rpl.setup(self.rpl_user, 10):
                msg = "Cannot setup replication."
                self._report(msg, logging.CRITICAL, False)
                raise UtilRplError(msg)
        else:
            # Parse user and password (support login-paths)
            try:
                (r_user, r_pass,) = parse_user_password(self.rpl_user)
            except FormatError:
                raise UtilError (USER_PASSWORD_FORMAT.format("--rpl-user"))

            # Switch master and start slave
            slave.switch_master(master, r_user, r_pass)
            slave.start({'fetch': False})

        # Disconnect from servers
        master.disconnect()
        slave.disconnect()
Example #38
0
                master = Server({'conn_info': master_val})
                slave = Server({'conn_info': slave_val})
                parser.error(
                    ERROR_MASTER_IN_SLAVES.format(master_host=master.host,
                                                  master_port=master.port,
                                                  slaves_candidates="slaves",
                                                  slave_host=slave.host,
                                                  slave_port=slave.port)
                )
        # Get the sql_mode set in master
        conn_opts = {
            'quiet': True,
            'version': "5.1.30",
        }
        try:
            servers = connect_servers(master_val, None, conn_opts)
            sql_mode = servers[0].select_variable("SQL_MODE")
        except UtilError:
            sql_mode = ''
    else:
        sql_mode = ''

    # Process list of databases/tables to exclude (check format errors).
    data_to_exclude = {}
    if opt.exclude:
        exclude_list = [val for val in opt.exclude.split(',') if val]
        data_to_exclude = db_objects_list_to_dictionary(parser, exclude_list,
                                                        'the --exclude option',
                                                        sql_mode=sql_mode)
    elif opt.exclude == '':
        # Issue an error if --exclude is used with no value.
Example #39
0
def export_databases(server_values, db_list, output_file, options):
    """Export one or more databases

    This method performs the export of a list of databases first dumping the
    definitions then the data. It supports dumping replication commands (STOP
    SLAVE, CHANGE MASTER, START SLAVE) for exporting data for use in
    replication scenarios.

    server_values[in]   server connection value dictionary.
    db_list[in]         list of database names.
    output_file[in]     file to store export output.
    options[in]         option dictionary.
                        Note: Must include the skip_* options for export.
    """
    fkeys_present = False
    export = options.get("export", "definitions")
    rpl_mode = options.get("rpl_mode", "master")
    quiet = options.get("quiet", False)
    skip_gtids = options.get("skip_gtid", False)  # default: generate GTIDs
    skip_fkeys = options.get("skip_fkeys", False)  # default: gen fkeys stmts

    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(server_values, None, conn_options)
    source = servers[0]

    # Retrieve all databases, if --all is used.
    if options.get("all", False):
        rows = source.get_all_databases()
        for row in rows:
            if row[0] not in db_list:
                db_list.append(row[0])

    # Check user permissions on source server for all databases.
    check_read_permissions(source, db_list, options)

    # Check for GTID support
    supports_gtid = servers[0].supports_gtid()
    if not skip_gtids and not supports_gtid == 'ON':
        skip_gtids = True
    elif skip_gtids and supports_gtid == 'ON':
        output_file.write(_GTID_WARNING)

    if not skip_gtids and supports_gtid == 'ON':
        # Check GTID version for complete feature support
        servers[0].check_gtid_version()
        warning_printed = False
        # Check to see if this is a full export (complete backup)
        all_dbs = servers[0].exec_query("SHOW DATABASES")
        for db in all_dbs:
            if warning_printed:
                continue
            if db[0].upper() in ["MYSQL", "INFORMATION_SCHEMA",
                                 "PERFORMANCE_SCHEMA"]:
                continue
            if not db[0] in db_list:
                output_file.write(_GTID_BACKUP_WARNING)
                warning_printed = True

    # Check for existence of foreign keys
    fkeys_enabled = servers[0].foreign_key_checks_enabled()
    if fkeys_enabled and skip_fkeys:
        output_file.write("# WARNING: Output contains tables with foreign key "
                          "contraints. You should disable foreign key checks "
                          "prior to importing this stream.\n")
    elif fkeys_enabled and db_list:
        db_name_list = ["'{0}'".format(db) for db in db_list]
        res = source.exec_query(_FKEYS.format(",".join(db_name_list)))
        if res and res[0]:
            fkeys_present = True
            write_commands(output_file, [_FKEYS_SWITCH.format("0")], options,
                           True)

    # Lock tables first
    my_lock = get_copy_lock(source, db_list, options, True)

    # Determine comment prefix for rpl commands.
    rpl_cmt_prefix = ""
    rpl_cmt = False
    if options.get("comment_rpl", False) or rpl_mode == "both":
        rpl_cmt_prefix = "#"
        rpl_cmt = True
    if options.get("format", "sql") != 'sql':
        rpl_cmt_prefix = _RPL_PREFIX
        rpl_cmt = True

    # if --rpl specified, write initial replication command
    rpl_info = None
    rpl_file = None
    if rpl_mode:
        rpl_info = get_change_master_command(source, options)
        if rpl_info[_RPL_FILE]:
            rpl_file = open(rpl_info[_RPL_FILE], 'w')
        else:
            rpl_file = output_file
        write_commands(rpl_file, ["STOP SLAVE;"], options, True, rpl_cmt,
                       rpl_cmt_prefix)

    # if GTIDs enabled and user requested the output, write the GTID commands
    if skip_gtids:
        gtid_info = None
    else:
        gtid_info = get_gtid_commands(source)

    if gtid_info:
        write_commands(output_file, gtid_info[0], options, True, rpl_cmt,
                       rpl_cmt_prefix)

    # dump metadata
    if export in ("definitions", "both"):
        _export_metadata(source, db_list, output_file, options)

    # dump data
    if export in ("data", "both"):
        if options.get("display", "brief") != "brief":
            output_file.write(
                "# NOTE : --display is ignored for data export.\n"
            )
        _export_data(source, server_values, db_list, output_file, options)

    # if GTIDs enabled, write the GTID-related commands
    if gtid_info:
        write_commands(output_file, [gtid_info[1]], options, True, rpl_cmt,
                       rpl_cmt_prefix)
    # if --rpl specified, write replication end command
    if rpl_mode:
        write_commands(rpl_file, rpl_info[_RPL_COMMANDS], options,
                       True, rpl_cmt, rpl_cmt_prefix)
        write_commands(rpl_file, ["START SLAVE;"], options, True,
                       rpl_cmt, rpl_cmt_prefix)
        # Last command wrote rpl_file, close it.
        if rpl_info[_RPL_FILE]:
            rpl_file.close()

    my_lock.unlock()

    if fkeys_present and fkeys_enabled and not skip_fkeys:
        write_commands(output_file, [_FKEYS_SWITCH.format("1")], options, True)
Example #40
0
    def _setup_replication(self, master_vals, use_rpl_setup=True):
        """Setup replication among a master and a slave.

        master_vals[in]      Master server connection dictionary.
        use_rpl_setup[in]    Use Replication.setup() if True otherwise use
                             switch_master() on the slave. This is used to
                             control the first pass in the masters round-robin
                             scheduling.
        """
        conn_options = {
            "src_name": "master",
            "dest_name": "slave",
            "version": "5.0.0",
            "unique": True,
        }
        (master, slave,) = connect_servers(master_vals, self.slave_vals,
                                           conn_options)
        rpl_options = self.options.copy()
        rpl_options["verbosity"] = self.verbosity > 0

        # Start from beginning only on the first pass
        if rpl_options.get("from_beginning", False) and not use_rpl_setup:
            rpl_options["from_beginning"] = False

        # Create an instance of the replication object
        rpl = Replication(master, slave, rpl_options)

        if use_rpl_setup:
            # Check server ids
            errors = rpl.check_server_ids()
            for error in errors:
                self._report(error, logging.ERROR, True)

            # Check for server_id uniqueness
            errors = rpl.check_server_uuids()
            for error in errors:
                self._report(error, logging.ERROR, True)

            # Check InnoDB compatibility
            errors = rpl.check_innodb_compatibility(self.options)
            for error in errors:
                self._report(error, logging.ERROR, True)

            # Checking storage engines
            errors = rpl.check_storage_engines(self.options)
            for error in errors:
                self._report(error, logging.ERROR, True)

            # Check master for binary logging
            errors = rpl.check_master_binlog()
            if not errors == []:
                raise UtilRplError(errors[0])

            # Setup replication
            if not rpl.setup(self.rpl_user, 10):
                msg = "Cannot setup replication."
                self._report(msg, logging.CRITICAL, False)
                raise UtilRplError(msg)
        else:
            # Parse user and password (support login-paths)
            (r_user, r_pass,) = parse_user_password(self.rpl_user)

            # Switch master and start slave
            slave.switch_master(master, r_user, r_pass)
            slave.start({'fetch': False})

        # Disconnect from servers
        master.disconnect()
        slave.disconnect()
Example #41
0
def export_databases(server_values, db_list, output_file, options):
    """Export one or more databases

    This method performs the export of a list of databases first dumping the
    definitions then the data. It supports dumping replication commands (STOP
    SLAVE, CHANGE MASTER, START SLAVE) for exporting data for use in
    replication scenarios.

    server_values[in]   server connection value dictionary.
    db_list[in]         list of database names.
    output_file[in]     file to store export output.
    options[in]         option dictionary.
                        Note: Must include the skip_* options for export.
    """
    fkeys_present = False
    export = options.get("export", "definitions")
    rpl_mode = options.get("rpl_mode", "master")
    quiet = options.get("quiet", False)
    skip_gtids = options.get("skip_gtid", False)  # default: generate GTIDs
    skip_fkeys = options.get("skip_fkeys", False)  # default: gen fkeys stmts

    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(server_values, None, conn_options)
    source = servers[0]

    # Retrieve all databases, if --all is used.
    if options.get("all", False):
        rows = source.get_all_databases()
        for row in rows:
            if row[0] not in db_list:
                db_list.append(row[0])

    # Check user permissions on source server for all databases.
    check_read_permissions(source, db_list, options)

    # Check for GTID support
    supports_gtid = servers[0].supports_gtid()
    if not skip_gtids and not supports_gtid == 'ON':
        skip_gtids = True
    elif skip_gtids and supports_gtid == 'ON':
        output_file.write(_GTID_WARNING)

    if not skip_gtids and supports_gtid == 'ON':
        # Check GTID version for complete feature support
        servers[0].check_gtid_version()
        warning_printed = False
        # Check to see if this is a full export (complete backup)
        all_dbs = servers[0].exec_query("SHOW DATABASES")
        for db in all_dbs:
            if warning_printed:
                continue
            if db[0].upper() in [
                    "MYSQL", "INFORMATION_SCHEMA", "PERFORMANCE_SCHEMA"
            ]:
                continue
            if not db[0] in db_list:
                output_file.write(_GTID_BACKUP_WARNING)
                warning_printed = True

    # Check for existence of foreign keys
    fkeys_enabled = servers[0].foreign_key_checks_enabled()
    if fkeys_enabled and skip_fkeys:
        output_file.write("# WARNING: Output contains tables with foreign key "
                          "contraints. You should disable foreign key checks "
                          "prior to importing this stream.\n")
    elif fkeys_enabled and db_list:
        db_name_list = ["'{0}'".format(db) for db in db_list]
        res = source.exec_query(_FKEYS.format(",".join(db_name_list)))
        if res and res[0]:
            fkeys_present = True
            write_commands(output_file, [_FKEYS_SWITCH.format("0")], options,
                           True)

    # Lock tables first
    my_lock = get_copy_lock(source, db_list, options, True)

    # Determine comment prefix for rpl commands.
    rpl_cmt_prefix = ""
    rpl_cmt = False
    if options.get("comment_rpl", False) or rpl_mode == "both":
        rpl_cmt_prefix = "#"
        rpl_cmt = True
    if options.get("format", "sql") != 'sql':
        rpl_cmt_prefix = _RPL_PREFIX
        rpl_cmt = True

    # if --rpl specified, write initial replication command
    rpl_info = None
    rpl_file = None
    if rpl_mode:
        rpl_info = get_change_master_command(source, options)
        if rpl_info[_RPL_FILE]:
            rpl_file = open(rpl_info[_RPL_FILE], 'w')
        else:
            rpl_file = output_file
        write_commands(rpl_file, ["STOP SLAVE;"], options, True, rpl_cmt,
                       rpl_cmt_prefix)

    # if GTIDs enabled and user requested the output, write the GTID commands
    if skip_gtids:
        gtid_info = None
    else:
        gtid_info = get_gtid_commands(source)

    if gtid_info:
        write_commands(output_file, gtid_info[0], options, True, rpl_cmt,
                       rpl_cmt_prefix)

    # dump metadata
    if export in ("definitions", "both"):
        _export_metadata(source, db_list, output_file, options)

    # dump data
    if export in ("data", "both"):
        if options.get("display", "brief") != "brief":
            output_file.write(
                "# NOTE : --display is ignored for data export.\n")
        _export_data(source, server_values, db_list, output_file, options)

    # if GTIDs enabled, write the GTID-related commands
    if gtid_info:
        write_commands(output_file, [gtid_info[1]], options, True, rpl_cmt,
                       rpl_cmt_prefix)
    # if --rpl specified, write replication end command
    if rpl_mode:
        write_commands(rpl_file, rpl_info[_RPL_COMMANDS], options, True,
                       rpl_cmt, rpl_cmt_prefix)
        write_commands(rpl_file, ["START SLAVE;"], options, True, rpl_cmt,
                       rpl_cmt_prefix)
        # Last command wrote rpl_file, close it.
        if rpl_info[_RPL_FILE]:
            rpl_file.close()

    my_lock.unlock()

    if fkeys_present and fkeys_enabled and not skip_fkeys:
        write_commands(output_file, [_FKEYS_SWITCH.format("1")], options, True)
Example #42
0
    output = os.path.abspath(options.get("output", ''))
    if not output.endswith('.sql'):
        parser.error("Output file must be end with .sql")

    output = output.replace(output.split(os.sep)[-1], '')
    if not os.path.isdir(output):
        parser.error("Output file path {} is not exist.".format(output))

    # Get the sql_mode set on source and destination server
    conn_opts = {
        'quiet': True,
        'version': "5.1.30",
    }
    try:
        servers = connect_servers(server1_values, server2_values, conn_opts)
        server1_sql_mode = servers[0].select_variable("SQL_MODE")
        if servers[1] is not None:
            server2_sql_mode = servers[1].select_variable("SQL_MODE")
        else:
            server2_sql_mode = ''
    except UtilError:
        server1_sql_mode = ''
        server2_sql_mode = ''

    # run the diff
    diff_failed = False
    for argument in args:
        db1, obj1, db2, obj2 = parse_database(argument)
        # We have db1.obj:db2.obj
        if obj1:
Example #43
0
def copy_db(src_val, dest_val, db_list, options):
    """Copy a database

    This method will copy a database and all of its objects and data from
    one server (source) to another (destination). Options are available to
    selectively ignore each type of object. The force parameter is
    used to permit the copy to overwrite an existing destination database
    (default is to not overwrite).

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, verbose, force, quiet,
                       connections, debug, exclude_names, exclude_patterns)

    Notes:
        force    - if True, the database on the destination will be dropped
                   if it exists (default is False)
        quiet    - do not print any information during operation
                   (default is False)

    Returns bool True = success, False = error
    """
    verbose = options.get("verbose", False)
    quiet = options.get("quiet", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)
    skip_data = options.get("skip_data", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_tables = options.get("skip_tables", False)
    skip_gtid = options.get("skip_gtid", False)
    locking = options.get("locking", "snapshot")

    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(src_val, dest_val, conn_options)
    cloning = (src_val == dest_val) or dest_val is None

    source = servers[0]
    if cloning:
        destination = servers[0]
    else:
        destination = servers[1]

    src_gtid = source.supports_gtid() == 'ON'
    dest_gtid = destination.supports_gtid() == 'ON'if destination else False

    # Get list of all databases from source if --all is specified.
    # Ignore system databases.
    if options.get("all", False):
        # The --all option is valid only if not cloning.
        if not cloning:
            if not quiet:
                print "# Including all databases."
            rows = source.get_all_databases()
            for row in rows:
                db_list.append((row[0], None))  # Keep same name
        else:
            raise UtilError("Cannot copy all databases on the same server.")
    elif not skip_gtid and src_gtid:
        # Check to see if this is a full copy (complete backup)
        all_dbs = source.exec_query("SHOW DATABASES")
        dbs = [db[0] for db in db_list]
        for db in all_dbs:
            if db[0].upper() in ["MYSQL", "INFORMATION_SCHEMA",
                                 "PERFORMANCE_SCHEMA"]:
                continue
            if not db[0] in dbs:
                print _GTID_BACKUP_WARNING
                break

    # Do error checking and preliminary work:
    #  - Check user permissions on source and destination for all databases
    #  - Check to see if executing on same server but same db name (error)
    #  - Build list of tables to lock for copying data (if no skipping data)
    #  - Check storage engine compatibility
    for db_name in db_list:
        source_db = Database(source, db_name[0])
        if destination is None:
            destination = source
        if db_name[1] is None:
            db = db_name[0]
        else:
            db = db_name[1]
        dest_db = Database(destination, db)

        # Make a dictionary of the options
        access_options = {
            'skip_views': skip_views,
            'skip_procs': skip_procs,
            'skip_funcs': skip_funcs,
            'skip_grants': skip_grants,
            'skip_events': skip_events,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)

        dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                   access_options)

        # Error is source db and destination db are the same and we're cloning
        if destination == source and db_name[0] == db_name[1]:
            raise UtilError("Destination database name is same as "
                            "source - source = %s, destination = %s" %
                            (db_name[0], db_name[1]))

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilError("Source database does not exist - %s" % db_name[0])

        # Check storage engines
        check_engine_options(destination,
                             options.get("new_engine", None),
                             options.get("def_engine", None),
                             False, options.get("quiet", False))

    # Get replication commands if rpl_mode specified.
    # if --rpl specified, dump replication initial commands
    rpl_info = None

    # Turn off foreign keys if they were on at the start
    destination.disable_foreign_key_checks(True)

    # Get GTID commands
    if not skip_gtid:
        gtid_info = get_gtid_commands(source)
        if src_gtid and not dest_gtid:
            print _NON_GTID_WARNING % ("destination", "source", "to")
        elif not src_gtid and dest_gtid:
            print _NON_GTID_WARNING % ("source", "destination", "from")
    else:
        gtid_info = None
        if src_gtid and not cloning:
            print _GTID_WARNING

    # If cloning, turn off gtid generation
    if gtid_info and cloning:
        gtid_info = None
    # if GTIDs enabled, write the GTID commands
    if gtid_info and dest_gtid:
        # Check GTID version for complete feature support
        destination.check_gtid_version()
        # Check the gtid_purged value too
        destination.check_gtid_executed()
        for cmd in gtid_info[0]:
            print "# GTID operation:", cmd
            destination.exec_query(cmd, {'fetch': False, 'commit': False})

    if options.get("rpl_mode", None):
        new_opts = options.copy()
        new_opts['multiline'] = False
        new_opts['strict'] = True
        rpl_info = get_change_master_command(src_val, new_opts)
        destination.exec_query("STOP SLAVE", {'fetch': False, 'commit': False})

    # Copy (create) objects.
    # We need to delay trigger and events to after data is loaded
    new_opts = options.copy()
    new_opts['skip_triggers'] = True
    new_opts['skip_events'] = True

    # Get the table locks unless we are cloning with lock-all
    if not (cloning and locking == 'lock-all'):
        my_lock = get_copy_lock(source, db_list, options, True)

    _copy_objects(source, destination, db_list, new_opts)

    # If we are cloning, take the write locks prior to copying data
    if cloning and locking == 'lock-all':
        my_lock = get_copy_lock(source, db_list, options, True, cloning)

    # Copy tables data
    if not skip_data and not skip_tables:

        # Copy tables
        for db_name in db_list:

            # Get a Database class instance
            db = Database(source, db_name[0], options)

            # Perform the copy
            # Note: No longer use threads, use multiprocessing instead.
            db.init()
            db.copy_data(db_name[1], options, destination, connections=1,
                         src_con_val=src_val, dest_con_val=dest_val)

    # if cloning with lock-all unlock here to avoid system table lock conflicts
    if cloning and locking == 'lock-all':
        my_lock.unlock()

    # Create triggers for all databases
    if not skip_triggers:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_events'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    # Create events for all databases
    if not skip_events:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_triggers'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    if not (cloning and locking == 'lock-all'):
        my_lock.unlock()

    # if GTIDs enabled, write the GTID-related commands
    if gtid_info and dest_gtid:
        print "# GTID operation:", gtid_info[1]
        destination.exec_query(gtid_info[1])

    if options.get("rpl_mode", None):
        for cmd in rpl_info[_RPL_COMMANDS]:
            if cmd[0] == '#' and not quiet:
                print cmd
            else:
                if verbose:
                    print cmd
                destination.exec_query(cmd)
        destination.exec_query("START SLAVE;")

    # Turn on foreign keys if they were on at the start
    destination.disable_foreign_key_checks(False)

    if not quiet:
        print "#...done."
    return True
Example #44
0
def setup_replication(master_vals,
                      slave_vals,
                      rpl_user,
                      options,
                      test_db=None):
    """Setup replication among a master and a slave.

    master_vals[in]    Master connection in form user:passwd@host:port:sock
    slave_vals[in]     Slave connection in form user:passwd@host:port:sock
    rpl_user[in]       Replication user in the form user:passwd
    options[in]        dictionary of options (verbosity, quiet, pedantic)
    test_db[in]        Test replication using this database name (optional)
                       default = None
    """
    verbosity = options.get("verbosity", 0)

    conn_options = {
        'src_name': "master",
        'dest_name': 'slave',
        'version': "5.0.0",
        'unique': True,
    }
    servers = connect_servers(master_vals, slave_vals, conn_options)
    master = servers[0]
    slave = servers[1]

    rpl_options = options.copy()
    rpl_options['verbosity'] = verbosity > 0

    # Create an instance of the replication object
    rpl = Replication(master, slave, rpl_options)
    errors = rpl.check_server_ids()
    for error in errors:
        print error

    # Check for server_id uniqueness
    if verbosity > 0:
        print "# master id = %s" % master.get_server_id()
        print "#  slave id = %s" % slave.get_server_id()

    errors = rpl.check_server_uuids()
    for error in errors:
        print error

    # Check for server_uuid uniqueness
    if verbosity > 0:
        print "# master uuid = %s" % master.get_server_uuid()
        print "#  slave uuid = %s" % slave.get_server_uuid()

    # Check InnoDB compatibility
    if verbosity > 0:
        print "# Checking InnoDB statistics for type and version conflicts."

    errors = rpl.check_innodb_compatibility(options)
    for error in errors:
        print error

    # Checking storage engines
    if verbosity > 0:
        print "# Checking storage engines..."

    errors = rpl.check_storage_engines(options)
    for error in errors:
        print error

    # Check master for binary logging
    print "# Checking for binary logging on master..."
    errors = rpl.check_master_binlog()
    if not errors == []:
        raise UtilError(errors[0])

    # Setup replication
    print "# Setting up replication..."
    if not rpl.setup(rpl_user, 10):
        raise UtilError("Cannot setup replication.")

    # Test the replication setup.
    if test_db:
        rpl.test(test_db, 10)

    print "# ...done."