예제 #1
0
def _get_transform(server1, server2, object1, object2, options):
    """Get the transformation SQL statements
    
    This method generates the SQL statements to transform the destination
    object based on direction of the compare.
    
    server1[in]        first server connection
    server2[in]        second server connection
    object1            the first object in the compare in the form: (db.name)
    object2            the second object in the compare in the form: (db.name)
    options[in]        a dictionary containing the options for the operation:
                       (quiet, etc.)

    Returns tuple - (bool - same db name?, list of transformation statements)
    """
    from mysql.utilities.common.database import Database
    from mysql.utilities.common.sql_transform import SQLTransformer

    obj_type = None
    direction = options.get("changes-for", "server1")

    # If there is no dot, we do not have the format 'db_name.obj_name' for
    # object1 and therefore must treat it as a database name.
    if object1.find(".") == -1:
        obj_type = "DATABASE"

        # We are working with databases so db and name need to be set
        # to the database name to tell the get_object_definition() method
        # to retrieve the database information.
        db1 = object1
        db2 = object2
        name1 = object1
        name2 = object2
    else:
        try:
            db1, name1 = object1.split(".")
            db2, name2 = object2.split(".")
        except:
            raise UtilError("Invalid object name arguments for _get_transform" "(): %s, %s." % (object1, object2))

    db_1 = Database(server1, db1, options)
    db_2 = Database(server2, db2, options)

    if obj_type is None:
        obj_type = db_1.get_object_type(name1)

    transform_str = []
    obj1 = db_1.get_object_definition(db1, name1, obj_type)
    obj2 = db_2.get_object_definition(db2, name2, obj_type)

    # Get the transformation based on direction.
    transform_str = []
    same_db_name = True
    xform = SQLTransformer(db_1, db_2, obj1[0], obj2[0], obj_type, options.get("verbosity", 0))

    differences = xform.transform_definition()
    if differences is not None and len(differences) > 0:
        transform_str.extend(differences)

    return transform_str
예제 #2
0
    def check_objects(self, server, db, events=True):
        """Check number of objects.
        
        Creates a string containing the number of objects for a given database.
        
        server[in]         Server object to query
        db[in]             name of database to check
        
        Returns string
        """

        from mysql.utilities.common.database import Database

        db_source = Database(server, db)
        db_source.init()
        res = db_source.get_db_objects("TABLE")
        str = "OBJECT COUNTS: tables = %s, " % (len(res))
        res = db_source.get_db_objects("VIEW")
        str += "views = %s, " % (len(res))
        res = db_source.get_db_objects("TRIGGER")
        str += "triggers = %s, " % (len(res))
        res = db_source.get_db_objects("PROCEDURE")
        str += "procedures = %s, " % (len(res))
        res = db_source.get_db_objects("FUNCTION")
        str += "functions = %s, " % (len(res))
        if events:
            res = db_source.get_db_objects("EVENT")
            str += "events = %s \n" % (len(res))
        return str
예제 #3
0
def check_read_permissions(server, db_list, options):
    """
    Check user permissions on server for specified databases.

    This method checks if the user used to establish the connection to the
    server has read permissions to access the specified lists of databases.

    server[in]      Server instance.
    db_list[in]     List of databases to check.
    options[in]     Dictionary with access options:
        skip_views     True = no views processed
        skip_proc      True = no procedures processed
        skip_func      True = no functions processed
        skip_grants    True = no grants processed
        skip_events    True = no events processed

    Returns an UtilDBError error if the server user does not have read
    permissions to access all specified databases or if any of them does not
    exist.
    """
    for db_name in db_list:
        source_db = Database(server, db_name)

        # Error if source database does not exist.
        if not source_db.exists():
            raise UtilDBError("Source database does not exist - "
                              "{0}".format(db_name), -1, db_name)

        # Check privileges to access database.
        source_db.check_read_access(server.user, server.host, options)
예제 #4
0
def _copy_objects(source, destination, db_list, options,
                  show_message=True, do_create=True):
    """Copy objects for a list of databases

    This method loops through a list of databases copying the objects as
    controlled by the skip options.

    source[in]             Server class instance for source
    destination[in]        Server class instance for destination
    options[in]            copy options
    show_message[in]       if True, display copy message
                           Default = True
    do_create[in]          if True, execute create statement for database
                           Default = True
    """
    # Copy objects
    for db_name in db_list:

        if show_message:
            # Display copy message
            if not options.get('quiet', False):
                msg = "# Copying database %s " % db_name[0]
                if db_name[1]:
                    msg += "renamed as %s" % (db_name[1])
                print msg

        # Get a Database class instance
        db = Database(source, db_name[0], options)

        # Perform the copy
        db.init()
        db.copy_objects(db_name[1], options, destination,
                        options.get("threads", False), do_create)
예제 #5
0
def check_read_permissions(server, db_list, options):
    """
    Check user permissions on server for specified databases.

    This method checks if the user used to establish the connection to the
    server has read permissions to access the specified lists of databases.

    server[in]      Server instance.
    db_list[in]     List of databases to check.
    options[in]     Dictionary with access options:
        skip_views     True = no views processed
        skip_proc      True = no procedures processed
        skip_func      True = no functions processed
        skip_grants    True = no grants processed
        skip_events    True = no events processed

    Returns an UtilDBError error if the server user does not have read
    permissions to access all specified databases or if any of them does not
    exist.
    """
    for db_name in db_list:
        source_db = Database(server, db_name, options)

        # Error if source database does not exist.
        if not source_db.exists():
            raise UtilDBError(
                "Source database does not exist - "
                "{0}".format(db_name), -1, db_name)

        # Check privileges to access database.
        source_db.check_read_access(server.user, server.host, options)
예제 #6
0
def _check_auto_increment(source, db_list, options):
    """Check auto increment values for 0

    If any tables are found to have 0 in the list of databases,
    the code prints a warning along with a sample statement
    that can be used should the user decide she needs it when
    she does the import.

    source[in]      Source connection
    db_list[in]     List of databases to export
    options[in[     Global option list
    """
    for db in db_list:
        db_obj = Database(source, db, options)
        # print warning if any tables have 0 as auto_increment value
        if db_obj.check_auto_increment():
            sql_mode = source.show_server_variable("sql_mode")
            sql_mode_str = "NO_AUTO_VALUE_ON_ZERO"
            if sql_mode[0]:
                sql_mode_str = sql_mode[0][1]
                if 'NO_AUTO_VALUE_ON_ZERO' not in sql_mode[0][1]:
                    sql_mode_str = ("'{0}',NO_AUTO_VALUE_ON_ZERO"
                                    "".format(sql_mode_str))
            print(_AUTO_INC_WARNING)
            print("# SET SQL_MODE = '{0}'\n#".format(sql_mode_str))
예제 #7
0
def object_diff(server1_val, server2_val, object1, object2, options,
                object_type=None):
    """diff the definition of two objects

    Find the difference among two object definitions.

    server1_val[in]    a dictionary containing connection information for the
                       first server including:
                       (user, password, host, port, socket)
    server2_val[in]    a dictionary containing connection information for the
                       second server including:
                       (user, password, host, port, socket)
    object1[in]        the first object in the compare in the form: (db.name)
    object2[in]        the second object in the compare in the form: (db.name)
    options[in]        a dictionary containing the options for the operation:
                       (quiet, verbosity, difftype)
    object_type[in]    type of the objects to be compared (e.g., TABLE,
                       PROCEDURE, etc.). By default None (not defined).

    Returns None = objects are the same, diff[] = tables differ
    """
    server1, server2 = server_connect(server1_val, server2_val,
                                      object1, object2, options)

    # Get the object type if unknown considering that objects of different
    # types can be found with the same name.
    if not object_type:
        #Get object types of object1
        regexp_obj = re.compile(REGEXP_QUALIFIED_OBJ_NAME)
        m_obj = regexp_obj.match(object1)
        db_name, obj_name = m_obj.groups()
        db = Database(server1, db_name, options)
        obj1_types = db.get_object_type(obj_name)
        if not obj1_types:
            raise UtilDBError("The object {0} does not exist.".format(object1))

        # Get object types of object2
        m_obj = regexp_obj.match(object2)
        db_name, obj_name = m_obj.groups()
        db = Database(server2, db_name, options)
        obj2_types = db.get_object_type(obj_name)
        if not obj2_types:
            raise UtilDBError("The object {0} does not exist.".format(object2))

        # Merge types found for both objects
        obj_types = set(obj1_types + obj2_types)

        # Diff objects considering all types found
        result = []
        for obj_type in obj_types:
            res = diff_objects(server1, server2, object1, object2, options,
                               obj_type)
            if res:
                result.append(res)
        return result if len(result) > 0 else None
    else:
        # Diff objects of known type
        return diff_objects(server1, server2, object1, object2, options,
                            object_type)
예제 #8
0
    def check_objects(self, server, db, events=True):
        """Check number of objects.
        
        Creates a string containing the number of objects for a given database.
        
        server[in]         Server object to query
        db[in]             name of database to check
        
        Returns string
        """

        from mysql.utilities.common.database import Database

        db_source = Database(server, db)
        db_source.init()
        res = db_source.get_db_objects("TABLE")
        str = "OBJECT COUNTS: tables = %s, " % (len(res))
        res = db_source.get_db_objects("VIEW")
        str += "views = %s, " % (len(res))
        res = db_source.get_db_objects("TRIGGER")
        str += "triggers = %s, " % (len(res))
        res = db_source.get_db_objects("PROCEDURE")
        str += "procedures = %s, " % (len(res))
        res = db_source.get_db_objects("FUNCTION")
        str += "functions = %s, " % (len(res))
        if events:
            res = db_source.get_db_objects("EVENT")
            str += "events = %s \n" % (len(res))
        return str
예제 #9
0
def _get_objects(server, database, options):
    """Get all objects from the database (except grants)

    server[in]        connected server object
    database[in]      database names
    options[in]       global options

    Returns list - objects in database
    """
    options["skip_grants"] = True   # Tell db class to skip grants

    db_obj = Database(server, database, options)
    if not db_obj.exists():
        raise UtilDBError("The database does not exist: {0}".format(database))
    db_obj.init()
    db_objects = db_obj.objects
    db_objects.sort()

    return db_objects
예제 #10
0
def get_create_object(server, object_name, options):
    """Get the object's create statement.
    
    This method retrieves the object create statement from the database.
    
    server[in]        server connection
    object_name[in]   name of object in the form db.objectname
    options[in]       options: verbosity, quiet
    
    Returns string : create statement or raise error if object or db not exist
    """
    from mysql.utilities.common.database import Database

    verbosity = options.get("verbosity", 0)
    quiet = options.get("quiet", False)

    db_name, sep, obj_name = object_name.partition(".")
    object = [db_name]

    db = Database(server, object[0], options)

    # Error if atabase does not exist
    if not db.exists():
        raise UtilDBError("The database does not exist: {0}".format(object[0]))

    if not obj_name:
        object.append(object[0])
        obj_type = "DATABASE"
    else:
        object.append(obj_name)
        obj_type = db.get_object_type(object[1])
        if obj_type is None:
            raise UtilDBError(
                "The object {0} does not exist.".format(object_name))
    create_stmt = db.get_create_statement(object[0], object[1], obj_type)

    if verbosity > 0 and not quiet:
        print "\n# Definition for object {0}:".format(object_name)
        print create_stmt

    return create_stmt
예제 #11
0
def get_create_object(server, object_name, options):
    """Get the object's create statement.
    
    This method retrieves the object create statement from the database.
    
    server[in]        server connection
    object_name[in]   name of object in the form db.objectname
    options[in]       options: verbosity, quiet
    
    Returns string : create statement or raise error if object or db not exist
    """
    from mysql.utilities.common.database import Database

    verbosity = options.get("verbosity", 0)
    quiet = options.get("quiet", False)

    db_name, sep, obj_name = object_name.partition(".")
    object = [db_name]

    db = Database(server, object[0], options)

    # Error if atabase does not exist
    if not db.exists():
        raise UtilDBError("The database does not exist: {0}".format(object[0]))

    if not obj_name:
        object.append(object[0])
        obj_type = "DATABASE"
    else:
        object.append(obj_name)
        obj_type = db.get_object_type(object[1])
        if obj_type is None:
            raise UtilDBError("The object {0} does not exist.".
                              format(object_name))
    create_stmt = db.get_create_statement(object[0], object[1], obj_type)
        
    if verbosity > 0 and not quiet:
        print "\n# Definition for object {0}:".format(object_name)
        print create_stmt 

    return create_stmt
예제 #12
0
def _get_objects(server, database, options):
    """Get all objects from the database (except grants)
    
    server[in]        connected server object
    database[in]      database names
    options[in]       global options
    
    Returns list - objects in database
    """
    from mysql.utilities.common.database import Database

    options["skip_grants"] = True  # Tell db class to skip grants

    db_obj = Database(server, database, options)
    if not db_obj.exists():
        raise UtilDBError("The database does not exist: {0}".format(database))
    db_obj.init()
    db_objects = db_obj.objects
    db_objects.sort()

    return db_objects
예제 #13
0
def _copy_objects(source, destination, db_list, options,
                  show_message=True, do_create=True):
    """Copy objects for a list of databases

    This method loops through a list of databases copying the objects as
    controlled by the skip options.

    source[in]             Server class instance for source
    destination[in]        Server class instance for destination
    options[in]            copy options
    show_message[in]       if True, display copy message
                           Default = True
    do_create[in]          if True, execute create statement for database
                           Default = True
    """
    # Copy objects
    for db_name in db_list:

        if show_message:
            # Display copy message
            if not options.get('quiet', False):
                msg = "# Copying database %s " % db_name[0]
                if db_name[1]:
                    msg += "renamed as %s" % (db_name[1])
                print msg

        # Get a Database class instance
        db = Database(source, db_name[0], options)

        # Perform the copy
        db.init()
        db.copy_objects(db_name[1], options, destination,
                        options.get("threads", False), do_create)
예제 #14
0
def get_create_object(server, object_name, options, object_type):
    """Get the object's create statement.

    This method retrieves the object create statement from the database.

    server[in]        server connection
    object_name[in]   name of object in the form db.objectname
    options[in]       options: verbosity, quiet
    object_type[in]   type of the specified object (e.g, TABLE, PROCEDURE,
                      etc.).

    Returns string : create statement or raise error if object or db not exist
    """

    verbosity = options.get("verbosity", 0)
    quiet = options.get("quiet", False)

    m_obj = re.match(REGEXP_QUALIFIED_OBJ_NAME, object_name)
    db_name, obj_name = m_obj.groups()
    obj = [db_name]

    db = Database(server, obj[0], options)

    # Error if database does not exist
    if not db.exists():
        raise UtilDBError("The database does not exist: {0}".format(obj[0]))

    if not obj_name or object_type == 'DATABASE':
        obj.append(db_name)
    else:
        obj.append(obj_name)

    create_stmt = db.get_create_statement(obj[0], obj[1], object_type)

    if verbosity > 0 and not quiet:
        print "\n# Definition for object {0}:".format(object_name)
        print create_stmt

    return create_stmt
예제 #15
0
def _get_transform(server1, server2, object1, object2, options,
                   object_type):
    """Get the transformation SQL statements

    This method generates the SQL statements to transform the destination
    object based on direction of the compare.

    server1[in]        first server connection
    server2[in]        second server connection
    object1            the first object in the compare in the form: (db.name)
    object2            the second object in the compare in the form: (db.name)
    options[in]        a dictionary containing the options for the operation:
                       (quiet, etc.)
    object_type[in]    type of the objects to be compared (e.g., TABLE,
                       PROCEDURE, etc.).

    Returns tuple - (bool - same db name?, list of transformation statements)
    """

    try:
        m_obj1 = re.match(REGEXP_QUALIFIED_OBJ_NAME, object1)
        db1, name1 = m_obj1.groups()
        m_obj2 = re.match(REGEXP_QUALIFIED_OBJ_NAME, object2)
        db2, name2 = m_obj2.groups()
    except:
        raise UtilError("Invalid object name arguments for _get_transform"
                        "(): %s, %s." % (object1, object2))
    # If the second part of the object qualified name is None, then the format
    # is not 'db_name.obj_name' for object1 and therefore must treat it as a
    # database name. (supports backticks and the use of '.' (dots) in names.)
    if not name1 or object_type == 'DATABASE':

        # We are working with databases so db and name need to be set
        # to the database name to tell the get_object_definition() method
        # to retrieve the database information.
        name1 = db1
        name2 = db2

    db_1 = Database(server1, db1, options)
    db_2 = Database(server2, db2, options)

    obj1 = db_1.get_object_definition(db1, name1, object_type)
    obj2 = db_2.get_object_definition(db2, name2, object_type)

    # Get the transformation based on direction.
    transform_str = []
    xform = SQLTransformer(db_1, db_2, obj1[0], obj2[0], object_type,
                           options.get('verbosity', 0))

    differences = xform.transform_definition()
    if differences and len(differences) > 0:
        transform_str.extend(differences)

    return transform_str
예제 #16
0
    def exists(self, tbl_name=None):
        """Check to see if the table exists

        tbl_name[in]       table name (db.table)
                           (optional) If omitted, operation is performed
                           on the class instance table name.

        return True = table exists, False = table does not exist
        """

        db, table = (None, None)
        if tbl_name:
            db, table = Database.parse_object_name(tbl_name)
        else:
            db = self.db_name
            table = self.tbl_name
        res = self.server.exec_query("SELECT TABLE_NAME " +
                                     "FROM INFORMATION_SCHEMA.TABLES " +
                                     "WHERE TABLE_SCHEMA = '%s'" % db +
                                     " and TABLE_NAME = '%s'" % table)

        return (res is not None and len(res) >= 1)
예제 #17
0
def export_metadata(source, src_val, db_list, options):
    """Produce rows to be used to recreate objects in a database.

    This method retrieves the objects for each database listed in the form
    of CREATE (SQL) statements or in a tabular form to the file specified.
    The valid values for the format parameter are SQL, CSV, TSV, VERTICAL,
    or GRID.

    source[in]         Server instance
    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format,
                       debug, exclude_names, exclude_patterns)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.format import format_tabular_list
    from mysql.utilities.common.format import format_vertical_list

    format = options.get("format", "sql")
    no_headers = options.get("no_headers", False)
    column_type = options.get("display", "brief")
    skip_create = options.get("skip_create", False)
    quiet = options.get("quiet", False)
    skip_tables = options.get("skip_tables", False)
    skip_views = options.get("skip_views", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)

    if options.get("all", False):
        rows = source.get_all_databases()
        for row in rows:
            db_list.append(row[0])

    # Check user permissions on source for all databases
    for db_name in db_list:
        source_db = Database(source, db_name)
        # Make a dictionary of the options
        access_options = {
            'skip_views'  : skip_views,
            'skip_procs'  : skip_procs,
            'skip_funcs'  : skip_funcs,
            'skip_grants' : skip_grants,
            'skip_events' : skip_events,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)
    
    for db_name in db_list:

        # Get a Database class instance
        db = Database(source, db_name, options)

        # Error is source database does not exist
        if not db.exists():
            raise UtilDBError("Source database does not exist - %s" % db_name,
                              -1, db_name)

        if not quiet:
            print "# Exporting metadata from %s" % db_name

        # Perform the extraction
        if format == "sql":
            db.init()
            # quote database name with backticks
            q_db_name = quote_with_backticks(db_name)
            if not skip_create:
                print "DROP DATABASE IF EXISTS %s;" % q_db_name
                print "CREATE DATABASE %s;" % q_db_name
            print "USE %s;" % q_db_name
            for dbobj in db.get_next_object():
                if dbobj[0] == "GRANT" and not skip_grants:
                    if not quiet:
                        print "# Grant:"
                    if dbobj[1][3]:
                        create_str = "GRANT %s ON %s.%s TO %s;" % \
                                     (dbobj[1][1], q_db_name,
                                      quote_with_backticks(dbobj[1][3]), 
                                      dbobj[1][0])
                    else:
                        create_str = "GRANT %s ON %s.* TO %s;" % \
                                     (dbobj[1][1], q_db_name, dbobj[1][0])
                    if create_str.find("%"):
                        create_str = re.sub("%", "%%", create_str)
                    print create_str
                else:
                    if not quiet:
                        print "# %s: %s.%s" % (dbobj[0], db_name,
                                               dbobj[1][0])
                    if (dbobj[0] == "PROCEDURE" and not skip_procs) or \
                       (dbobj[0] == "FUNCTION" and not skip_funcs) or \
                       (dbobj[0] == "EVENT" and not skip_events) or \
                       (dbobj[0] == "TRIGGER" and not skip_triggers):
                        print "DELIMITER ||"
                    print "%s;" % db.get_create_statement(db_name,
                                                          dbobj[1][0],
                                                          dbobj[0])
                    if (dbobj[0] == "PROCEDURE" and not skip_procs) or \
                       (dbobj[0] == "FUNCTION" and not skip_funcs) or \
                       (dbobj[0] == "EVENT" and not skip_events) or \
                       (dbobj[0] == "TRIGGER" and not skip_triggers):
                        print "||"
                        print "DELIMITER ;"
        else:
            objects = []
            if not skip_tables:
                objects.append("TABLE")
            if not skip_views:
                objects.append("VIEW")
            if not skip_triggers:
                objects.append("TRIGGER")
            if not skip_procs:
                objects.append("PROCEDURE")
            if not skip_funcs:
                objects.append("FUNCTION")
            if not skip_events:
                objects.append("EVENT")
            if not skip_grants:
                objects.append("GRANT")
            for obj_type in objects:
                sys.stdout.write("# %sS in %s:" % (obj_type, db_name))
                if format in ('grid', 'vertical'):
                    rows = db.get_db_objects(obj_type, column_type, True)
                else:
                    rows = db.get_db_objects(obj_type, column_type, True, True)
                if len(rows[1]) < 1:
                    print " (none found)"
                else:
                    print
                    # Cannot use print_list here becasue we must manipulate
                    # the behavior of format_tabular_list
                    list_options = {}
                    if format == "vertical":
                        format_vertical_list(sys.stdout, rows[0], rows[1])
                    elif format == "tab":
                        list_options['print_header'] = not no_headers
                        list_options['separator'] = '\t'
                        format_tabular_list(sys.stdout, rows[0], rows[1],
                                            list_options)
                    elif format == "csv":
                        list_options['print_header'] = not no_headers
                        list_options['separator'] = ','
                        format_tabular_list(sys.stdout, rows[0], rows[1],
                                            list_options)
                    else:  # default to table format
                        format_tabular_list(sys.stdout, rows[0], rows[1])

    if not quiet:
        print "#...done."

    return True
예제 #18
0
def import_file(dest_val, file_name, options):
    """Import a file

    This method reads a file and, if needed, transforms the file into
    discrete SQL statements for execution on the destination server.

    It accepts any of the formal structured files produced by the
    mysqlexport utility including formats SQL, CSV, TAB, GRID, and
    VERTICAL.

    It will read these files and skip or include the definitions or data
    as specified in the options. An error is raised for any conversion
    errors or errors while executing the statements.

    Users are highly encouraged to use the --dryrun option which will
    print the SQL statements without executing them.

    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    file_name[in]      name (and path) of the file to import
    options[in]        a dictionary containing the options for the import:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format, and debug)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.options import check_engine_options
    from mysql.utilities.common.table import Table
    from mysql.utilities.common.server import connect_servers

    # Helper method to dig through the definitions for create statements
    def _process_definitions(statements, table_col_list, db_name):
        # First, get the SQL strings
        sql_strs = _build_create_objects(obj_type, db_name, definitions)
        statements.extend(sql_strs)
        # Now, save the column list
        col_list = _build_col_metadata(obj_type, definitions)
        if len(col_list) > 0:
            table_col_list.extend(col_list)

    def _process_data(tbl_name, statements, columns, table_col_list,
                      table_rows, skip_blobs):
        # if there is data here, build bulk inserts
        # First, create table reference, then call insert_rows()
        tbl = Table(destination, tbl_name)
        # Need to check to see if table exists!
        if tbl.exists():
            tbl.get_column_metadata()
            col_meta = True
        elif len(table_col_list) > 0:
            col_meta = _get_column_metadata(tbl, table_col_list)
        else:
            fix_cols = []
            fix_cols.append((tbl.tbl_name, columns))
            col_meta = _get_column_metadata(tbl, fix_cols)
        if not col_meta:
            raise UtilError("Cannot build bulk insert statements without "
                            "the table definition.")
        ins_strs = tbl.make_bulk_insert(table_rows, tbl.q_db_name)
        if len(ins_strs[0]) > 0:
            statements.extend(ins_strs[0])
        if len(ins_strs[1]) > 0 and not skip_blobs:
            for update in ins_strs[1]:
                statements.append(update)

    # Gather options
    format = options.get("format", "sql")
    no_headers = options.get("no_headers", False)
    quiet = options.get("quiet", False)
    import_type = options.get("import_type", "definitions")
    single = options.get("single", True)
    dryrun = options.get("dryrun", False)
    do_drop = options.get("do_drop", False)
    skip_blobs = options.get("skip_blobs", False)
    skip_gtid = options.get("skip_gtid", False)

    # Attempt to connect to the destination server
    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(dest_val, None, conn_options)

    destination = servers[0]

    # Check storage engines
    check_engine_options(destination, options.get("new_engine", None),
                         options.get("def_engine", None), False,
                         options.get("quiet", False))

    if not quiet:
        if import_type == "both":
            str = "definitions and data"
        else:
            str = import_type
        print "# Importing %s from %s." % (str, file_name)

    # Setup variables we will need
    skip_header = not no_headers
    if format == "sql":
        skip_header = False
    get_db = True
    check_privileges = False
    db_name = None
    file = open(file_name)
    columns = []
    read_columns = False
    table_rows = []
    obj_type = ""
    definitions = []
    statements = []
    table_col_list = []
    tbl_name = ""
    skip_rpl = options.get("skip_rpl", False)
    gtid_command_found = False
    supports_gtid = servers[0].supports_gtid() == 'ON'
    skip_gtid_warning_printed = False
    gtid_version_checked = False

    # Read the file one object/definition group at a time
    for row in read_next(file, format):
        # Check for replication command
        if row[0] == "RPL_COMMAND":
            if not skip_rpl:
                statements.append(row[1])
            continue
        if row[0] == "GTID_COMMAND":
            gtid_command_found = True
            if not supports_gtid:
                # only display warning once
                if not skip_gtid_warning_printed:
                    print _GTID_SKIP_WARNING
                    skip_gtid_warning_printed = True
            elif not skip_gtid:
                if not gtid_version_checked:
                    gtid_version_checked = True
                    # Check GTID version for complete feature support
                    servers[0].check_gtid_version()
                    # Check the gtid_purged value too
                    servers[0].check_gtid_executed("import")
                statements.append(row[1])
            continue
        # If this is the first pass, get the database name from the file
        if get_db:
            if skip_header:
                skip_header = False
            else:
                db_name = _get_db(row)
                # quote db_name with backticks if needed
                if db_name and not is_quoted_with_backticks(db_name):
                    db_name = quote_with_backticks(db_name)
                get_db = False
                if do_drop and import_type != "data":
                    statements.append("DROP DATABASE IF EXISTS %s;" % db_name)
                if import_type != "data":
                    if not _skip_object("CREATE_DB", options) and \
                       not format == 'sql':
                        statements.append("CREATE DATABASE %s;" % db_name)

        # This is the first time through the loop so we must
        # check user permissions on source for all databases
        if db_name is not None:
            dest_db = Database(destination, db_name)

            # Make a dictionary of the options
            access_options = options.copy()

            dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                       access_options)

        # Now check to see if we want definitions, data, or both:
        if row[0] == "sql" or row[0] in _DEFINITION_LIST:
            if format != "sql" and len(row[1]) == 1:
                raise UtilError("Cannot read an import file generated with "
                                "--display=NAMES")

            if import_type in ("definitions", "both"):
                if format == "sql":
                    statements.append(row[1])
                else:
                    if obj_type == "":
                        obj_type = row[0]
                    if obj_type != row[0]:
                        if len(definitions) > 0:
                            _process_definitions(statements, table_col_list,
                                                 db_name)
                        obj_type = row[0]
                        definitions = []
                    if not _skip_object(row[0], options):
                        definitions.append(row[1])
        else:
            # see if there are any definitions to process
            if len(definitions) > 0:
                _process_definitions(statements, table_col_list, db_name)
                definitions = []

            if import_type in ("data", "both"):
                if _skip_object("DATA", options):
                    continue  # skip data
                elif format == "sql":
                    statements.append(row[1])
                else:
                    if row[0] == "BEGIN_DATA":
                        # Start of table so first row is columns.
                        if len(table_rows) > 0:
                            _process_data(tbl_name, statements, columns,
                                          table_col_list, table_rows,
                                          skip_blobs)
                            table_rows = []
                        read_columns = True
                        tbl_name = row[1]
                        if not is_quoted_with_backticks(tbl_name):
                            db, sep, tbl = tbl_name.partition('.')
                            q_db = quote_with_backticks(db)
                            q_tbl = quote_with_backticks(tbl)
                            tbl_name = ".".join([q_db, q_tbl])
                    else:
                        if read_columns:
                            columns = row[1]
                            read_columns = False
                        else:
                            if not single:
                                table_rows.append(row[1])
                            else:
                                str = _build_insert_data(
                                    columns, tbl_name, row[1])
                                statements.append(str)

    # Process remaining definitions
    if len(definitions) > 0:
        _process_definitions(statements, table_col_list, db_name)
        definitions = []

    # Process remaining data rows
    if len(table_rows) > 0:
        _process_data(tbl_name, statements, columns, table_col_list,
                      table_rows, skip_blobs)
        table_rows = []

    # Now process the statements
    _exec_statements(statements, destination, format, options, dryrun)

    file.close()

    # Check gtid process
    if supports_gtid and not gtid_command_found:
        print _GTID_MISSING_WARNING

    if not quiet:
        print "#...done."
    return True
예제 #19
0
def _export_data(source, server_values, db_list, output_file, options):
    """Export data from the specified list of databases.

    This private method retrieves the data for each specified databases in SQL
    format (e.g., INSERT statements) or in a tabular form (GRID, TAB, CSV,
    VERTICAL) to the specified file.

    This private method does not check permissions.

    source[in]         Server instance.
    server_values[in]  Server connection values.
    db_list[in]        List of databases to export.
    output_file[in]    Output file to store the export data.
    options[in]        Dictionary containing the options for the export:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format, file_per_tbl,
                       and debug).
    """
    frmt = options.get("format", "sql")
    quiet = options.get("quiet", False)
    file_per_table = options.get("file_per_tbl", False)
    sql_mode = source.select_variable("SQL_MODE")

    # Get tables list.
    table_list = []
    for db_name in db_list:
        source_db = Database(source, db_name, options)
        # Build table list.
        tables = source_db.get_db_objects("TABLE")
        for table in tables:
            table_list.append((db_name, table[0]))

    previous_db = ""
    export_tbl_tasks = []
    for table in table_list:

        # Determine start for processing table from a different database.
        db_name = table[0]
        if previous_db != db_name:
            previous_db = db_name
            if not quiet:
                q_db_name = quote_with_backticks(db_name, sql_mode)
                if frmt == "sql":
                    output_file.write("USE {0};\n".format(q_db_name))
                output_file.write(
                    "# Exporting data from {0}\n".format(q_db_name))
                if file_per_table:
                    output_file.write("# Writing table data to files.\n")

            # Print sample SOURCE command warning even in quiet mode.
            if file_per_table and frmt == 'sql':
                output_file.write("# The following are sample SOURCE commands."
                                  " If needed correct the path to match files "
                                  "location.\n")

        # Check multiprocess table export (only on POSIX systems).
        if options['multiprocess'] > 1 and os.name == 'posix':
            # Create export task.
            # Note: Server connection values are passed in the task dictionary
            # instead of a server instance, otherwise a multiprocessing error
            # is issued when assigning the task to a worker.
            export_task = {
                'srv_con': server_values,
                'table': table,
                'options': options,
            }
            export_tbl_tasks.append(export_task)
        else:
            # Export data from a table (no multiprocessing).
            _export_table_data(source, table, output_file, options)

        # Print SOURCE command if --file-per-table is used and format is SQL.
        if file_per_table and frmt == 'sql':
            tbl_name = ".".join(table)
            output_file.write("# SOURCE {0}\n".format(
                _generate_tbl_filename(tbl_name, frmt)))

    # Export tables concurrently.
    if export_tbl_tasks:
        # Create process pool.
        workers_pool = multiprocessing.Pool(processes=options['multiprocess'])
        # Concurrently export tables.
        res = workers_pool.map_async(multiprocess_tbl_export_task,
                                     export_tbl_tasks)
        workers_pool.close()
        # Get list of temporary files with the exported data.
        tmp_files_list = res.get()
        workers_pool.join()

        # Merge resulting temp files (if generated).
        for tmp_filename in tmp_files_list:
            if tmp_filename:
                tmp_file = open(tmp_filename, 'r')
                shutil.copyfileobj(tmp_file, output_file)
                tmp_file.close()
                os.remove(tmp_filename)

    if not quiet:
        output_file.write("#...done.\n")
예제 #20
0
def check_index(src_val, table_args, options):
    """Check for duplicate or redundant indexes for one or more tables
    
    This method will examine the indexes for one or more tables and identify
    any indexes that are potential duplicates or redundant. It prints the
    equivalent DROP statements if selected.
    
    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    table_args[in]     list of tables in the form 'db.table' or 'db'
    options[in]        dictionary of options to include:
                         show-drops   : show drop statements for dupe indexes
                         skip         : skip non-existant tables
                         verbosity    : print extra information
                         show-indexes : show all indexes for each table
                         index-format : index format = sql, table, tab, csv
                         worst        : show worst performing indexes
                         best         : show best performing indexes
    
    Returns bool True = success, raises UtilError if error
    """
    
    # Get options
    show_drops = options.get("show-drops", False)
    skip = options.get("skip", False)
    verbosity = options.get("verbosity", False)
    show_indexes = options.get("show-indexes", False)
    index_format = options.get("index-format", False)
    stats = options.get("stats", False)
    first_indexes = options.get("best", None)        
    last_indexes = options.get("worst", None)

    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.common.database import Database
    from mysql.utilities.common.table import Table

    # Try to connect to the MySQL database server.
    conn_options = {
        'quiet'     : verbosity == 1,
        'version'   : "5.0.0",
    }
    servers = connect_servers(src_val, None, conn_options)

    source = servers[0]

    db_list = []     # list of databases
    table_list = []  # list of all tables to process
    
    # Build a list of objects to process
    # 1. start with db_list if no obects present on command line
    # 2. process command line options.
    # 3. loop through database list and add all tables
    # 4. check indexes
        
    # Perform the options check here. Loop through objects presented.
    for obj in table_args:
        # If a . appears, we are operating on a specific table
        idx = obj.count(".")
        if (idx == 1):
            table_list.append(obj)
        # Else we are operating on a specific database.
        else:
            db_list.append(obj)
    
    # Loop through database list adding tables
    for db in db_list:
        db_source = Database(source, db)
        db_source.init()
        tables = db_source.get_db_objects("TABLE")
        if not tables and verbosity >= 1:
            print "# Warning: database %s does not exist. Skipping." % (db)
        for table in tables:
            table_list.append(db + "." + table[0])

    # Fail if no tables to check
    if not table_list:
        raise UtilError("No tables to check.")

    if verbosity > 1:
        print "# Checking indexes..."
    # Check indexes for each table in the list
    for table_name in table_list:
        tbl_options = {
            'verbose'  : verbosity >= 1,
            'get_cols' : False,
            'quiet'    : verbosity is None or verbosity < 1
        }
        tbl = Table(source, table_name, tbl_options)
        exists = tbl.exists()
        if not exists and not skip:
            raise UtilError("Table %s does not exist. Use --skip "
                                 "to skip missing tables." % table_name)
        if exists:
            if not tbl.get_indexes():
                if verbosity > 1:
                    print "# Table %s is not indexed." % (table_name)
            else:
                if show_indexes:
                    tbl.print_indexes(index_format)
                    # Show if table has primary key
                if not tbl.has_primary_key():
                    if verbosity > 1:
                        print "#   Table %s does not contain a PRIMARY key."
                tbl.check_indexes(show_drops)
                
            # Show best and/or worst indexes
            if stats:
                if first_indexes is not None:
                    tbl.show_special_indexes(index_format, first_indexes, True)
                if last_indexes is not None:
                    tbl.show_special_indexes(index_format, last_indexes)
                
        if verbosity > 1:
            print "#"

    if verbosity > 1:    
        print "# ...done."
예제 #21
0
def get_copy_lock(server, db_list, options, include_mysql=False,
                  cloning=False):
    """Get an instance of the Lock class with a standard copy (read) lock

    This method creates an instance of the Lock class using the lock type
    specified in the options. It is used to initiate the locks for the copy
    and related operations.

    server[in]             Server instance for locking calls
    db_list[in]            list of database names
    options[in]            option dictionary
                           Must include the skip_* options for copy and export
    include_mysql[in]      if True, include the mysql tables for copy operation
    cloning[in]            if True, create lock tables with WRITE on dest db
                           Default = False

    Returns Lock - Lock class instance
    """
    rpl_mode = options.get("rpl_mode", None)
    locking = options.get('locking', 'snapshot')

    # Determine if we need to use FTWRL. There are two conditions:
    #  - running on master (rpl_mode = 'master')
    #  - using locking = 'lock-all' and rpl_mode present
    if (rpl_mode in ["master", "both"]) or \
            (rpl_mode and locking == 'lock-all'):
        new_opts = options.copy()
        new_opts['locking'] = 'flush'
        lock = Lock(server, [], new_opts)

    # if this is a lock-all type and not replication operation,
    # find all tables and lock them
    elif locking == 'lock-all':
        table_lock_list = []

        # Build table lock list
        for db_name in db_list:
            db = db_name[0] if type(db_name) == tuple else db_name
            source_db = Database(server, db)
            tables = source_db.get_db_objects("TABLE")
            for table in tables:
                table_lock_list.append(("{0}.{1}".format(db, table[0]),
                                        'READ'))
                # Cloning requires issuing WRITE locks because we use same
                # conn.
                # Non-cloning will issue WRITE lock on a new destination conn.
                if cloning:
                    if db_name[1] is None:
                        db_clone = db_name[0]
                    else:
                        db_clone = db_name[1]
                    # For cloning, we use the same connection so we need to
                    # lock the destination tables with WRITE.
                    table_lock_list.append(("{0}.{1}".format(db_clone,
                                                             table[0]),
                                            'WRITE'))
            # We must include views for server version 5.5.3 and higher
            if server.check_version_compat(5, 5, 3):
                tables = source_db.get_db_objects("VIEW")
                for table in tables:
                    table_lock_list.append(("{0}.{1}".format(db, table[0]),
                                            'READ'))
                    # Cloning requires issuing WRITE locks because we use same
                    # conn.
                    # Non-cloning will issue WRITE lock on a new destination
                    # conn.
                    if cloning:
                        if db_name[1] is None:
                            db_clone = db_name[0]
                        else:
                            db_clone = db_name[1]
                        # For cloning, we use the same connection so we need to
                        # lock the destination tables with WRITE.
                        table_lock_list.append(("{0}.{1}".format(db_clone,
                                                                 table[0]),
                                                'WRITE'))

        # Now add mysql tables
        if include_mysql:
            # Don't lock proc tables if no procs of funcs are being read
            if not options.get('skip_procs', False) and \
               not options.get('skip_funcs', False):
                table_lock_list.append(("mysql.proc", 'READ'))
                table_lock_list.append(("mysql.procs_priv", 'READ'))
            # Don't lock event table if events are skipped
            if not options.get('skip_events', False):
                table_lock_list.append(("mysql.event", 'READ'))
        lock = Lock(server, table_lock_list, options)

    # Use default or no locking option
    else:
        lock = Lock(server, [], options)

    return lock
예제 #22
0
def _export_metadata(source, db_list, output_file, options):
    """Export metadata from the specified list of databases.

    This private method retrieves the objects metadata for each database listed
    in the form of CREATE (SQL) statements or in a tabular form (GRID, TAB,
    CSV, VERTICAL) to the specified file.

    This private method does not check permissions.

    source[in]         Server instance.
    db_list[in]        List of databases to export.
    output_file[in]    Output file to store the metadata information.
    options[in]        Dictionary containing the options for the export:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format,
                       debug, exclude_names, exclude_patterns)
    """
    frmt = options.get("format", "sql")
    no_headers = options.get("no_headers", False)
    column_type = options.get("display", "brief")
    quiet = options.get("quiet", False)
    skip_create = options.get("skip_create", False)
    skip_tables = options.get("skip_tables", False)
    skip_views = options.get("skip_views", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)

    for db_name in db_list:

        # Get a Database class instance
        db = Database(source, db_name, options)

        # Export database metadata
        if not quiet:
            output_file.write(
                "# Exporting metadata from {0}\n".format(db.db_name)
            )

        # Perform the extraction
        if frmt == "sql":
            db.init()
            if not skip_create:
                output_file.write(
                    "DROP DATABASE IF EXISTS {0};\n".format(db.q_db_name)
                )
                output_file.write(
                    "CREATE DATABASE {0};\n".format(db.q_db_name)
                )
            output_file.write("USE {0};\n".format(db.q_db_name))
            for dbobj in db.get_next_object():
                if dbobj[0] == "GRANT" and not skip_grants:
                    if not quiet:
                        output_file.write("# Grant:\n")
                    if dbobj[1][3]:
                        create_str = "GRANT {0} ON {1}.{2} TO {3};\n".format(
                            dbobj[1][1], db.q_db_name,
                            quote_with_backticks(dbobj[1][3]), dbobj[1][0]
                        )
                    else:
                        create_str = "GRANT {0} ON {1}.* TO {2};\n".format(
                            dbobj[1][1], db.q_db_name, dbobj[1][0]
                        )
                    output_file.write(create_str)
                else:
                    if not quiet:
                        output_file.write(
                            "# {0}: {1}.{2}\n".format(dbobj[0], db.db_name,
                                                      dbobj[1][0])
                        )
                    if (dbobj[0] == "PROCEDURE" and not skip_procs) or \
                       (dbobj[0] == "FUNCTION" and not skip_funcs) or \
                       (dbobj[0] == "EVENT" and not skip_events) or \
                       (dbobj[0] == "TRIGGER" and not skip_triggers):
                        output_file.write("DELIMITER ||\n")
                    output_file.write("{0};\n".format(
                        db.get_create_statement(db.db_name, dbobj[1][0],
                                                dbobj[0])
                    ))
                    if (dbobj[0] == "PROCEDURE" and not skip_procs) or \
                       (dbobj[0] == "FUNCTION" and not skip_funcs) or \
                       (dbobj[0] == "EVENT" and not skip_events) or \
                       (dbobj[0] == "TRIGGER" and not skip_triggers):
                        output_file.write("||\n")
                        output_file.write("DELIMITER ;\n")
        else:
            objects = []
            if not skip_tables:
                objects.append("TABLE")
            if not skip_funcs:
                objects.append("FUNCTION")
            if not skip_procs:
                objects.append("PROCEDURE")
            if not skip_views:
                objects.append("VIEW")
            if not skip_triggers:
                objects.append("TRIGGER")
            if not skip_events:
                objects.append("EVENT")
            if not skip_grants:
                objects.append("GRANT")
            for obj_type in objects:
                output_file.write(
                    "# {0}S in {1}:".format(obj_type, db.db_name)
                )
                if frmt in ('grid', 'vertical'):
                    rows = db.get_db_objects(obj_type, column_type, True)
                else:
                    rows = db.get_db_objects(obj_type, column_type, True, True)
                if len(rows[1]) < 1:
                    output_file.write(" (none found)\n")
                else:
                    output_file.write("\n")
                    # Cannot use print_list here because we must manipulate
                    # the behavior of format_tabular_list.
                    list_options = {}
                    if frmt == "vertical":
                        format_vertical_list(output_file, rows[0], rows[1])
                    elif frmt == "tab":
                        list_options['print_header'] = not no_headers
                        list_options['separator'] = '\t'
                        format_tabular_list(output_file, rows[0], rows[1],
                                            list_options)
                    elif frmt == "csv":
                        list_options['print_header'] = not no_headers
                        list_options['separator'] = ','
                        format_tabular_list(output_file, rows[0], rows[1],
                                            list_options)
                    else:  # default to table format
                        format_tabular_list(output_file, rows[0], rows[1])

    if not quiet:
        output_file.write("#...done.\n")
예제 #23
0
def object_diff(server1_val,
                server2_val,
                object1,
                object2,
                options,
                object_type=None):
    """diff the definition of two objects

    Find the difference among two object definitions.

    server1_val[in]    a dictionary containing connection information for the
                       first server including:
                       (user, password, host, port, socket)
    server2_val[in]    a dictionary containing connection information for the
                       second server including:
                       (user, password, host, port, socket)
    object1[in]        the first object in the compare in the form: (db.name)
    object2[in]        the second object in the compare in the form: (db.name)
    options[in]        a dictionary containing the options for the operation:
                       (quiet, verbosity, difftype)
    object_type[in]    type of the objects to be compared (e.g., TABLE,
                       PROCEDURE, etc.). By default None (not defined).

    Returns None = objects are the same, diff[] = tables differ
    """
    if isinstance(server1_val, dict):  # dict or common.server.Server object
        server1, server2 = server_connect(server1_val, server2_val, object1,
                                          object2, options)
    else:
        # to save connection
        server1, server2 = server1_val, server2_val

    force = options.get("force", None)

    # compare db's all objects
    include_create = options.get("include_create", False)
    # db1.*:db2.*
    if include_create and object1.endswith('.*') and object2.endswith('.*'):
        direction = options.get("changes-for", None)
        reverse = options.get("reverse", False)

        db_name1, _ = parse_object_name(object1,
                                        server1.select_variable("SQL_MODE"))
        db_name2, _ = parse_object_name(object2,
                                        server2.select_variable("SQL_MODE"))
        in_both, in_db1, in_db2 = get_common_objects(server1, server2,
                                                     db_name1, db_name2, True,
                                                     options)
        # create/alter/drop need all objects compare
        all_object = set(in_both + in_db1 + in_db2)

        # call myself recusively to compare all objects
        for this_obj in all_object:
            object1 = db_name1 + "." + this_obj[1][0]
            object2 = db_name2 + "." + this_obj[1][0]
            # share the same connection in this loop. object_type=None
            object_diff(server1,
                        server2,
                        object1,
                        object2,
                        options,
                        object_type=None)
        return []

    # Get the object type if unknown considering that objects of different
    # types can be found with the same name.
    if not object_type:
        # Get object types of object1
        sql_mode = server1.select_variable("SQL_MODE")
        db_name, obj_name = parse_object_name(object1, sql_mode)
        db = Database(server1, db_name, options)
        obj1_types = db.get_object_type(obj_name)
        if not obj1_types:
            if include_create:
                # if allow generating create object ddl, give 'NULL' object here to tell common.dbcompare.py to handle
                obj1_types = ['NULL']
            else:
                msg = "The object {0} does not exist.".format(object1)
                if not force:
                    raise UtilDBError(msg)
                print("ERROR: {0}".format(msg))
                return []

        # Get object types of object2
        sql_mode = server2.select_variable("SQL_MODE")
        db_name, obj_name = parse_object_name(object2, sql_mode)
        db = Database(server2, db_name, options)
        obj2_types = db.get_object_type(obj_name)
        if not obj2_types:
            if include_create:
                obj2_types = ['NULL']
            else:
                msg = "The object {0} does not exist.".format(object2)
                if not force:
                    raise UtilDBError(msg)
                print("ERROR: {0}".format(msg))
                return []

        # Merge types found for both objects
        obj_types = set(obj1_types + obj2_types)
        if obj_types == set(['NULL']):
            msg = "The object {0} or {1} does not exist in the source side.".format(
                object1, object2)
            if not force:
                raise UtilDBError(msg)
            print("ERROR: {0}".format(msg))
            return []
        elif 'NULL' in obj_types:
            # at least one object exist in db1 and db2
            # new db object like  TABLE-NULL or NULL-TABLE , 'TABLE' is needed for after use in diff_objects()
            obj_types = set(['-'.join(obj1_types + obj2_types)])

        # Diff objects considering all types found
        result = []
        for obj_type in obj_types:
            res = diff_objects(server1, server2, object1, object2, options,
                               obj_type)
            if res:
                result.append(res)
        return result if len(result) > 0 else None
    else:
        # Diff objects of known type
        return diff_objects(server1, server2, object1, object2, options,
                            object_type)
예제 #24
0
def check_index(src_val, table_args, options):
    """Check for duplicate or redundant indexes for one or more tables
    
    This method will examine the indexes for one or more tables and identify
    any indexes that are potential duplicates or redundant. It prints the
    equivalent DROP statements if selected.
    
    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    table_args[in]     list of tables in the form 'db.table' or 'db'
    options[in]        dictionary of options to include:
                         show-drops   : show drop statements for dupe indexes
                         skip         : skip non-existant tables
                         verbosity    : print extra information
                         show-indexes : show all indexes for each table
                         index-format : index format = sql, table, tab, csv
                         worst        : show worst performing indexes
                         best         : show best performing indexes
    
    Returns bool True = success, raises UtilError if error
    """

    # Get options
    show_drops = options.get("show-drops", False)
    skip = options.get("skip", False)
    verbosity = options.get("verbosity", False)
    show_indexes = options.get("show-indexes", False)
    index_format = options.get("index-format", False)
    stats = options.get("stats", False)
    first_indexes = options.get("best", None)
    last_indexes = options.get("worst", None)

    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.common.database import Database
    from mysql.utilities.common.table import Table

    # Try to connect to the MySQL database server.
    conn_options = {
        'quiet': verbosity == 1,
        'version': "5.0.0",
    }
    servers = connect_servers(src_val, None, conn_options)

    source = servers[0]

    db_list = []  # list of databases
    table_list = []  # list of all tables to process

    # Build a list of objects to process
    # 1. start with db_list if no obects present on command line
    # 2. process command line options.
    # 3. loop through database list and add all tables
    # 4. check indexes

    # Perform the options check here. Loop through objects presented.
    for obj in table_args:
        # If a . appears, we are operating on a specific table
        idx = obj.count(".")
        if (idx == 1):
            table_list.append(obj)
        # Else we are operating on a specific database.
        else:
            db_list.append(obj)

    # Loop through database list adding tables
    for db in db_list:
        db_source = Database(source, db)
        db_source.init()
        tables = db_source.get_db_objects("TABLE")
        if not tables and verbosity >= 1:
            print "# Warning: database %s does not exist. Skipping." % (db)
        for table in tables:
            table_list.append(db + "." + table[0])

    # Fail if no tables to check
    if not table_list:
        raise UtilError("No tables to check.")

    if verbosity > 1:
        print "# Checking indexes..."
    # Check indexes for each table in the list
    for table_name in table_list:
        tbl_options = {
            'verbose': verbosity >= 1,
            'get_cols': False,
            'quiet': verbosity is None or verbosity < 1
        }
        tbl = Table(source, table_name, tbl_options)
        exists = tbl.exists()
        if not exists and not skip:
            raise UtilError("Table %s does not exist. Use --skip "
                            "to skip missing tables." % table_name)
        if exists:
            if not tbl.get_indexes():
                if verbosity > 1:
                    print "# Table %s is not indexed." % (table_name)
            else:
                if show_indexes:
                    tbl.print_indexes(index_format)
                    # Show if table has primary key
                if not tbl.has_primary_key():
                    if verbosity > 1:
                        print "#   Table %s does not contain a PRIMARY key."
                tbl.check_indexes(show_drops)

            # Show best and/or worst indexes
            if stats:
                if first_indexes is not None:
                    tbl.show_special_indexes(index_format, first_indexes, True)
                if last_indexes is not None:
                    tbl.show_special_indexes(index_format, last_indexes)

        if verbosity > 1:
            print "#"

    if verbosity > 1:
        print "# ...done."
예제 #25
0
def object_diff(server1_val,
                server2_val,
                object1,
                object2,
                options,
                object_type=None):
    """diff the definition of two objects

    Find the difference among two object definitions.

    server1_val[in]    a dictionary containing connection information for the
                       first server including:
                       (user, password, host, port, socket)
    server2_val[in]    a dictionary containing connection information for the
                       second server including:
                       (user, password, host, port, socket)
    object1[in]        the first object in the compare in the form: (db.name)
    object2[in]        the second object in the compare in the form: (db.name)
    options[in]        a dictionary containing the options for the operation:
                       (quiet, verbosity, difftype)
    object_type[in]    type of the objects to be compared (e.g., TABLE,
                       PROCEDURE, etc.). By default None (not defined).

    Returns None = objects are the same, diff[] = tables differ
    """
    objectype = options.get("objectype", 'ALL').upper()
    if not object_type and objectype != 'ALL':
        object_type = objectype
    if object_type and objectype != 'ALL' and object_type != objectype:
        print('The object type {} is skip'.format(object_type))
        return None
    server1, server2 = server_connect(server1_val, server2_val, object1,
                                      object2, options)

    force = options.get("force", None)
    # Get the object type if unknown considering that objects of different
    # types can be found with the same name.
    result = []
    if not object_type:
        # Get object types of object1
        sql_mode = server1.select_variable("SQL_MODE")
        db_name, obj_name = parse_object_name(object1, sql_mode)
        db = Database(server1, db_name, options)
        obj1_types = db.get_object_type(obj_name)
        if not obj1_types:
            msg = "The object {0} does not exist.".format(object1)
            if not force:
                raise UtilDBError(msg)
            print("ERROR: {0}".format(msg))
            return []

        # Get object types of object2
        sql_mode = server2.select_variable("SQL_MODE")
        db_name, obj_name = parse_object_name(object2, sql_mode)
        db = Database(server2, db_name, options)
        obj2_types = db.get_object_type(obj_name)
        if not obj2_types:
            msg = "The object {0} does not exist.".format(object2)
            if not force:
                raise UtilDBError(msg)
            print("ERROR: {0}".format(msg))
            return []

        # Merge types found for both objects
        obj_types = set(obj1_types + obj2_types)

        # Diff objects considering all types found
        for obj_type in obj_types:
            res = diff_objects(server1, server2, object1, object2, options,
                               obj_type)
            if res:
                result.append(res)
    else:
        # Diff objects of known type
        res = diff_objects(server1, server2, object1, object2, options,
                           object_type)
        if res:
            result.append(res)
    if len(result) > 0 and options.get(
            "difftype", None) == 'sql' and options.get(
                "output", None) and options.get("output", '').endswith('.sql'):
        with open(options.get("output"), 'a', encoding='utf8') as fp:
            for res in result:
                if isinstance(res, list):
                    for r in res:
                        if r and r.strip().startswith('#'):
                            continue
                        fp.write('{}\n'.format(r))
                else:
                    fp.write('{}\n'.format(res))
    return result if len(result) > 0 else None
예제 #26
0
    def __init__(self, server1, name, options=None):
        """Constructor

        server[in]         A Server object
        name[in]           Name of table in the form (db.table)
        options[in]        options for class: verbose, quiet, get_cols,
            quiet     If True, do not print information messages
            verbose   print extra data during operations (optional)
                      (default is False)
            get_cols  If True, get the column metadata on construction
                      (default is False)
        """
        if options is None:
            options = {}
        self.verbose = options.get('verbose', False)
        self.quiet = options.get('quiet', False)
        self.server = server1

        # Keep table identifier considering backtick quotes
        if is_quoted_with_backticks(name):
            self.q_table = name
            self.q_db_name, self.q_tbl_name = Database.parse_object_name(name)
            self.db_name = remove_backtick_quoting(self.q_db_name)
            self.tbl_name = remove_backtick_quoting(self.q_tbl_name)
            self.table = ".".join([self.db_name, self.tbl_name])
        else:
            self.table = name
            self.db_name, self.tbl_name = Database.parse_object_name(name)
            self.q_db_name = quote_with_backticks(self.db_name)
            self.q_tbl_name = quote_with_backticks(self.tbl_name)
            self.q_table = ".".join([self.q_db_name, self.q_tbl_name])
        self.obj_type = "TABLE"
        self.pri_idx = None

        # We store each type of index in a separate list to make it easier
        # to manipulate
        self.btree_indexes = []
        self.hash_indexes = []
        self.rtree_indexes = []
        self.fulltext_indexes = []
        self.text_columns = []
        self.blob_columns = []
        self.column_format = None
        self.column_names = []
        self.q_column_names = []
        if options.get('get_cols', False):
            self.get_column_metadata()
        self.dest_vals = None
        self.storage_engine = None

        # Get max allowed packet
        res = self.server.exec_query("SELECT @@session.max_allowed_packet")
        if res:
            self.max_packet_size = res[0][0]
        else:
            self.max_packet_size = _MAXPACKET_SIZE
        # Watch for invalid values
        if self.max_packet_size > _MAXPACKET_SIZE:
            self.max_packet_size = _MAXPACKET_SIZE

        self._insert = "INSERT INTO %s.%s VALUES "
        self.query_options = {  # Used for skipping fetch of rows
            'fetch': False
        }
예제 #27
0
def check_index(src_val, table_args, options):
    """Check for duplicate or redundant indexes for one or more tables

    This method will examine the indexes for one or more tables and identify
    any indexes that are potential duplicates or redundant. It prints the
    equivalent DROP statements if selected.

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    table_args[in]     list of tables in the form 'db.table' or 'db'
    options[in]        dictionary of options to include:
                         show-drops   : show drop statements for dupe indexes
                         skip         : skip non-existent tables
                         verbosity    : print extra information
                         show-indexes : show all indexes for each table
                         index-format : index format = sql, table, tab, csv
                         worst        : show worst performing indexes
                         best         : show best performing indexes
                         report-indexes : reports tables without PK or UK

    Returns bool True = success, raises UtilError if error
    """

    # Get options
    show_drops = options.get("show-drops", False)
    skip = options.get("skip", False)
    verbosity = options.get("verbosity", False)
    show_indexes = options.get("show-indexes", False)
    index_format = options.get("index-format", False)
    stats = options.get("stats", False)
    first_indexes = options.get("best", None)
    last_indexes = options.get("worst", None)
    report_indexes = options.get("report-indexes", False)

    # Try to connect to the MySQL database server.
    conn_options = {
        'quiet': verbosity == 1,
        'version': "5.0.0",
    }
    servers = connect_servers(src_val, None, conn_options)

    source = servers[0]

    db_list = []  # list of databases
    table_list = []  # list of all tables to process

    # Build a list of objects to process
    # 1. start with db_list if no objects present on command line
    # 2. process command line options.
    # 3. loop through database list and add all tables
    # 4. check indexes

    # Get sql_mode value set on servers
    sql_mode = source.select_variable("SQL_MODE")

    # Perform the options check here. Loop through objects presented.
    for obj in table_args:
        m_obj = parse_object_name(obj, sql_mode)
        # Check if a valid database/table name is specified.
        if m_obj[0] is None:
            raise UtilError(
                PARSE_ERR_OBJ_NAME_FORMAT.format(
                    obj_name=obj, option="the database/table arguments"))
        else:
            db_name, obj_name = m_obj
            if obj_name:
                # Table specified
                table_list.append(obj)
            # Else we are operating on a specific database.
            else:
                # Remove backtick quotes.
                db_name = remove_backtick_quoting(db_name, sql_mode) \
                    if is_quoted_with_backticks(db_name, sql_mode) else db_name
                db_list.append(db_name)

    # Loop through database list adding tables
    for db in db_list:
        db_source = Database(source, db)
        db_source.init()
        tables = db_source.get_db_objects("TABLE")
        if not tables and verbosity >= 1:
            print("# Warning: database %s does not exist. Skipping." % (db))
        for table in tables:
            table_list.append("{0}.{1}".format(
                quote_with_backticks(db, sql_mode),
                quote_with_backticks(table[0], sql_mode)))

    # Fail if no tables to check
    if not table_list:
        raise UtilError("No tables to check.")

    if verbosity > 1:
        print("# Checking indexes...")
    # Check indexes for each table in the list
    # pylint: disable=R0101
    for table_name in table_list:
        tbl_options = {
            'verbose': verbosity >= 1,
            'get_cols': False,
            'quiet': verbosity is None or verbosity < 1
        }
        tbl = Table(source, table_name, tbl_options)
        exists = tbl.exists()
        if not exists and not skip:
            raise UtilError("Table %s does not exist. Use --skip "
                            "to skip missing tables." % table_name)
        if exists:
            if not tbl.get_indexes():
                if verbosity > 1 or report_indexes:
                    print("# Table %s is not indexed." % (table_name))
            else:
                if show_indexes:
                    tbl.print_indexes(index_format, verbosity)
                    # Show if table has primary key
                if verbosity > 1 or report_indexes:
                    if not tbl.has_primary_key():
                        if not tbl.has_unique_key():
                            print("# Table {0} does not contain neither a "
                                  "PRIMARY nor UNIQUE key.".format(table_name))
                        else:
                            print("# Table {0} does not contain a PRIMARY key."
                                  "".format(table_name))
                tbl.check_indexes(show_drops)

            # Show best and/or worst indexes
            if stats:
                if first_indexes is not None:
                    tbl.show_special_indexes(index_format, first_indexes, True)
                if last_indexes is not None:
                    tbl.show_special_indexes(index_format, last_indexes)

        if verbosity > 1:
            print("#")

    if verbosity > 1:
        print("# ...done.")
예제 #28
0
def export_data(source, src_val, db_list, options):
    """Produce data for the tables in a database.

    This method retrieves the data for each table in the databases listed in
    the form of BULK INSERT (SQL) statements or in a tabular form to the file
    specified. The valid values for the format parameter are SQL, CSV, TSV,
    VERITCAL, or GRID.

    source[in]         Server instance
    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format, file_per_tbl,
                       and debug)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.table import Table

    format = options.get("format", "sql")
    no_headers = options.get("no_headers", True)
    column_type = options.get("display", "brief")
    single = options.get("single", False)
    skip_blobs = options.get("skip_blobs", False)
    quiet = options.get("quiet", False)
    file_per_table = options.get("file_per_tbl", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)

    if options.get("all", False):
        rows = source.get_all_databases()
        for row in rows:
            if row[0] not in db_list:
                db_list.append(row[0])

    # Check if database exists and user permissions on source for all databases
    table_lock_list = []
    table_list = []
    for db_name in db_list:
        source_db = Database(source, db_name)

        # Make a dictionary of the options
        access_options = {
            'skip_views': skip_views,
            'skip_procs': skip_procs,
            'skip_funcs': skip_funcs,
            'skip_grants': skip_grants,
            'skip_events': skip_events,
        }

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilDBError("Source database does not exist - %s" % db_name,
                              -1, db_name)

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)

        # Build table list
        tables = source_db.get_db_objects("TABLE")
        for table in tables:
            table_list.append((db_name, table[0]))

    old_db = ""
    for table in table_list:
        db_name = table[0]
        tbl_name = "%s.%s" % (db_name, table[1])
        # quote database and table name with backticks
        q_db_name = quote_with_backticks(db_name)
        q_tbl_name = "%s.%s" % (q_db_name, quote_with_backticks(table[1]))
        if not quiet and old_db != db_name:
            old_db = db_name
            if format == "sql":
                print "USE %s;" % q_db_name
            print "# Exporting data from %s" % db_name
            if file_per_table:
                print "# Writing table data to files."

        tbl_options = {'verbose': False, 'get_cols': True, 'quiet': quiet}
        cur_table = Table(source, q_tbl_name, tbl_options)
        if single and format not in ("sql", "grid", "vertical"):
            retrieval_mode = -1
            first = True
        else:
            retrieval_mode = 1
            first = False

        message = "# Data for table %s: " % q_tbl_name

        # switch for writing to files
        if file_per_table:
            if format == 'sql':
                file_name = tbl_name + ".sql"
            else:
                file_name = tbl_name + ".%s" % format.lower()
            outfile = open(file_name, "w")
            outfile.write(message + "\n")
        else:
            outfile = None
            print message

        for data_rows in cur_table.retrieve_rows(retrieval_mode):
            _export_row(data_rows, cur_table, format, single, skip_blobs,
                        first, no_headers, outfile)
            if first:
                first = False

        if file_per_table:
            outfile.close()

    if not quiet:
        print "#...done."

    return True
예제 #29
0
def check_index(src_val, table_args, options):
    """Check for duplicate or redundant indexes for one or more tables

    This method will examine the indexes for one or more tables and identify
    any indexes that are potential duplicates or redundant. It prints the
    equivalent DROP statements if selected.

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    table_args[in]     list of tables in the form 'db.table' or 'db'
    options[in]        dictionary of options to include:
                         show-drops   : show drop statements for dupe indexes
                         skip         : skip non-existent tables
                         verbosity    : print extra information
                         show-indexes : show all indexes for each table
                         index-format : index format = sql, table, tab, csv
                         worst        : show worst performing indexes
                         best         : show best performing indexes
                         report-indexes : reports tables without PK or UK

    Returns bool True = success, raises UtilError if error
    """

    # Get options
    show_drops = options.get("show-drops", False)
    skip = options.get("skip", False)
    verbosity = options.get("verbosity", False)
    show_indexes = options.get("show-indexes", False)
    index_format = options.get("index-format", False)
    stats = options.get("stats", False)
    first_indexes = options.get("best", None)
    last_indexes = options.get("worst", None)
    report_indexes = options.get("report-indexes", False)

    # Try to connect to the MySQL database server.
    conn_options = {
        'quiet': verbosity == 1,
        'version': "5.0.0",
    }
    servers = connect_servers(src_val, None, conn_options)

    source = servers[0]

    db_list = []     # list of databases
    table_list = []  # list of all tables to process

    # Build a list of objects to process
    # 1. start with db_list if no objects present on command line
    # 2. process command line options.
    # 3. loop through database list and add all tables
    # 4. check indexes

    obj_name_regexp = re.compile(REGEXP_QUALIFIED_OBJ_NAME)

    # Perform the options check here. Loop through objects presented.
    for obj in table_args:
        m_obj = obj_name_regexp.match(obj)
        # Check if a valid database/table name is specified.
        if not m_obj:
            raise UtilError(PARSE_ERR_OBJ_NAME_FORMAT.format(
                obj_name=obj, option="the database/table arguments"))
        else:
            db_name, obj_name = m_obj.groups()
            if obj_name:
                # Table specified
                table_list.append(obj)
            # Else we are operating on a specific database.
            else:
                # Remove backtick quotes.
                db_name = remove_backtick_quoting(db_name) \
                    if is_quoted_with_backticks(db_name) else db_name
                db_list.append(db_name)

    # Loop through database list adding tables
    for db in db_list:
        db_source = Database(source, db)
        db_source.init()
        tables = db_source.get_db_objects("TABLE")
        if not tables and verbosity >= 1:
            print "# Warning: database %s does not exist. Skipping." % (db)
        for table in tables:
            table_list.append("{0}.{1}".format(quote_with_backticks(db),
                                               quote_with_backticks(table[0])))

    # Fail if no tables to check
    if not table_list:
        raise UtilError("No tables to check.")

    if verbosity > 1:
        print "# Checking indexes..."
    # Check indexes for each table in the list
    for table_name in table_list:
        tbl_options = {
            'verbose': verbosity >= 1,
            'get_cols': False,
            'quiet': verbosity is None or verbosity < 1
        }
        tbl = Table(source, table_name, tbl_options)
        exists = tbl.exists()
        if not exists and not skip:
            raise UtilError("Table %s does not exist. Use --skip "
                            "to skip missing tables." % table_name)
        if exists:
            if not tbl.get_indexes():
                if verbosity > 1 or report_indexes:
                    print "# Table %s is not indexed." % (table_name)
            else:
                if show_indexes:
                    tbl.print_indexes(index_format, verbosity)
                    # Show if table has primary key
                if verbosity > 1 or report_indexes:
                    if not tbl.has_primary_key():
                        if not tbl.has_unique_key():
                            print("# Table {0} does not contain neither a "
                                  "PRIMARY nor UNIQUE key.".format(table_name))
                        else:
                            print("# Table {0} does not contain a PRIMARY key."
                                  "".format(table_name))
                tbl.check_indexes(show_drops)

            # Show best and/or worst indexes
            if stats:
                if first_indexes is not None:
                    tbl.show_special_indexes(index_format, first_indexes, True)
                if last_indexes is not None:
                    tbl.show_special_indexes(index_format, last_indexes)

        if verbosity > 1:
            print "#"

    if verbosity > 1:
        print "# ...done."
예제 #30
0
def _export_data(source, server_values, db_list, output_file, options):
    """Export data from the specified list of databases.

    This private method retrieves the data for each specified databases in SQL
    format (e.g., INSERT statements) or in a tabular form (GRID, TAB, CSV,
    VERTICAL) to the specified file.

    This private method does not check permissions.

    source[in]         Server instance.
    server_values[in]  Server connection values.
    db_list[in]        List of databases to export.
    output_file[in]    Output file to store the export data.
    options[in]        Dictionary containing the options for the export:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format, file_per_tbl,
                       and debug).
    """
    frmt = options.get("format", "sql")
    quiet = options.get("quiet", False)
    file_per_table = options.get("file_per_tbl", False)

    # Get tables list.
    table_list = []
    for db_name in db_list:
        source_db = Database(source, db_name)
        # Build table list.
        tables = source_db.get_db_objects("TABLE")
        for table in tables:
            table_list.append((db_name, table[0]))

    previous_db = ""
    export_tbl_tasks = []
    for table in table_list:

        # Determine start for processing table from a different database.
        db_name = table[0]
        if previous_db != db_name:
            previous_db = db_name
            if not quiet:
                if frmt == "sql":
                    q_db_name = quote_with_backticks(db_name)
                    output_file.write("USE {0};\n".format(q_db_name))
                output_file.write(
                    "# Exporting data from {0}\n".format(db_name)
                )
                if file_per_table:
                    output_file.write("# Writing table data to files.\n")

            # Print sample SOURCE command warning even in quiet mode.
            if file_per_table and frmt == 'sql':
                output_file.write("# The following are sample SOURCE commands."
                                  " If needed correct the path to match files "
                                  "location.\n")

        # Check multiprocess table export (only on POSIX systems).
        if options['multiprocess'] > 1 and os.name == 'posix':
            # Create export task.
            # Note: Server connection values are passed in the task dictionary
            # instead of a server instance, otherwise a multiprocessing error
            # is issued when assigning the task to a worker.
            export_task = {
                'srv_con': server_values,
                'table': table,
                'options': options,
            }
            export_tbl_tasks.append(export_task)
        else:
            # Export data from a table (no multiprocessing).
            _export_table_data(source, table, output_file, options)

        # Print SOURCE command if --file-per-table is used and format is SQL.
        if file_per_table and frmt == 'sql':
            tbl_name = ".".join(table)
            output_file.write(
                "# SOURCE {0}\n".format(_generate_tbl_filename(tbl_name, frmt))
            )

    # Export tables concurrently.
    if export_tbl_tasks:
        # Create process pool.
        workers_pool = multiprocessing.Pool(
            processes=options['multiprocess']
        )
        # Concurrently export tables.
        res = workers_pool.map_async(multiprocess_tbl_export_task,
                                     export_tbl_tasks)
        workers_pool.close()
        # Get list of temporary files with the exported data.
        tmp_files_list = res.get()
        workers_pool.join()

        # Merge resulting temp files (if generated).
        for tmp_filename in tmp_files_list:
            if tmp_filename:
                tmp_file = open(tmp_filename, 'r')
                shutil.copyfileobj(tmp_file, output_file)
                tmp_file.close()
                os.remove(tmp_filename)

    if not quiet:
        output_file.write("#...done.\n")
예제 #31
0
def export_metadata(source, src_val, db_list, options):
    """Produce rows to be used to recreate objects in a database.

    This method retrieves the objects for each database listed in the form
    of CREATE (SQL) statements or in a tabular form to the file specified.
    The valid values for the format parameter are SQL, CSV, TSV, VERTICAL,
    or GRID.

    source[in]         Server instance
    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format,
                       debug, exclude_names, exclude_patterns)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.format import format_tabular_list
    from mysql.utilities.common.format import format_vertical_list

    format = options.get("format", "sql")
    no_headers = options.get("no_headers", False)
    column_type = options.get("display", "brief")
    skip_create = options.get("skip_create", False)
    quiet = options.get("quiet", False)
    skip_tables = options.get("skip_tables", False)
    skip_views = options.get("skip_views", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)

    if options.get("all", False):
        rows = source.get_all_databases()
        for row in rows:
            db_list.append(row[0])

    # Check user permissions on source for all databases
    for db_name in db_list:
        source_db = Database(source, db_name)
        # Make a dictionary of the options
        access_options = {
            'skip_views': skip_views,
            'skip_procs': skip_procs,
            'skip_funcs': skip_funcs,
            'skip_grants': skip_grants,
            'skip_events': skip_events,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)

    for db_name in db_list:

        # Get a Database class instance
        db = Database(source, db_name, options)

        # Error is source database does not exist
        if not db.exists():
            raise UtilDBError("Source database does not exist - %s" % db_name,
                              -1, db_name)

        if not quiet:
            print "# Exporting metadata from %s" % db_name

        # Perform the extraction
        if format == "sql":
            db.init()
            # quote database name with backticks
            q_db_name = quote_with_backticks(db_name)
            if not skip_create:
                print "DROP DATABASE IF EXISTS %s;" % q_db_name
                print "CREATE DATABASE %s;" % q_db_name
            print "USE %s;" % q_db_name
            for dbobj in db.get_next_object():
                if dbobj[0] == "GRANT" and not skip_grants:
                    if not quiet:
                        print "# Grant:"
                    if dbobj[1][3]:
                        create_str = "GRANT %s ON %s.%s TO %s;" % \
                                     (dbobj[1][1], q_db_name,
                                      quote_with_backticks(dbobj[1][3]),
                                      dbobj[1][0])
                    else:
                        create_str = "GRANT %s ON %s.* TO %s;" % \
                                     (dbobj[1][1], q_db_name, dbobj[1][0])
                    if create_str.find("%"):
                        create_str = re.sub("%", "%%", create_str)
                    print create_str
                else:
                    if not quiet:
                        print "# %s: %s.%s" % (dbobj[0], db_name, dbobj[1][0])
                    if (dbobj[0] == "PROCEDURE" and not skip_procs) or \
                       (dbobj[0] == "FUNCTION" and not skip_funcs) or \
                       (dbobj[0] == "EVENT" and not skip_events) or \
                       (dbobj[0] == "TRIGGER" and not skip_triggers):
                        print "DELIMITER ||"
                    print "%s;" % db.get_create_statement(
                        db_name, dbobj[1][0], dbobj[0])
                    if (dbobj[0] == "PROCEDURE" and not skip_procs) or \
                       (dbobj[0] == "FUNCTION" and not skip_funcs) or \
                       (dbobj[0] == "EVENT" and not skip_events) or \
                       (dbobj[0] == "TRIGGER" and not skip_triggers):
                        print "||"
                        print "DELIMITER ;"
        else:
            objects = []
            if not skip_tables:
                objects.append("TABLE")
            if not skip_views:
                objects.append("VIEW")
            if not skip_triggers:
                objects.append("TRIGGER")
            if not skip_procs:
                objects.append("PROCEDURE")
            if not skip_funcs:
                objects.append("FUNCTION")
            if not skip_events:
                objects.append("EVENT")
            if not skip_grants:
                objects.append("GRANT")
            for obj_type in objects:
                sys.stdout.write("# %sS in %s:" % (obj_type, db_name))
                if format in ('grid', 'vertical'):
                    rows = db.get_db_objects(obj_type, column_type, True)
                else:
                    rows = db.get_db_objects(obj_type, column_type, True, True)
                if len(rows[1]) < 1:
                    print " (none found)"
                else:
                    print
                    # Cannot use print_list here becasue we must manipulate
                    # the behavior of format_tabular_list
                    list_options = {}
                    if format == "vertical":
                        format_vertical_list(sys.stdout, rows[0], rows[1])
                    elif format == "tab":
                        list_options['print_header'] = not no_headers
                        list_options['separator'] = '\t'
                        format_tabular_list(sys.stdout, rows[0], rows[1],
                                            list_options)
                    elif format == "csv":
                        list_options['print_header'] = not no_headers
                        list_options['separator'] = ','
                        format_tabular_list(sys.stdout, rows[0], rows[1],
                                            list_options)
                    else:  # default to table format
                        format_tabular_list(sys.stdout, rows[0], rows[1])

    if not quiet:
        print "#...done."

    return True
예제 #32
0
def _check_tables_structure(server1, server2, object1, object2, options,
                            diff_type):
    """Check if the tables have the same structure.

    This method compares the tables structure ignoring the order of the
    columns and retrieves the differences between the table options.

    server1[in]        first server connection.
    server2[in]        second server connection.
    object1            the first object in the compare in the form: (db.name).
    object2            the second object in the compare in the form: (db.name).
    options[in]        a dictionary containing the options for the operation:
                       (quiet, verbosity, difftype, width, suppress_sql).
    diff_type[in]      difference type.

    Returns a tuple (bool, list, bool) - The first tuple value is a boolean
    that indicates if both tables have the same structure (i.e. column
    definitions). The second returns the table options differences. Finally,
    the third is a boolean indicating if the partition options are the same.
    """
    try:
        m_obj1 = re.match(REGEXP_QUALIFIED_OBJ_NAME, object1)
        db1, name1 = m_obj1.groups()
        m_obj2 = re.match(REGEXP_QUALIFIED_OBJ_NAME, object2)
        db2, name2 = m_obj2.groups()
    except:
        raise UtilError("Invalid object name arguments for diff_objects(): "
                        "{0}, {1}.".format(object1, object2))

    # If the second part of the object qualified name is None, then the format
    # is not 'db_name.obj_name' for object1 and therefore must treat it as a
    # database name.
    if not name1:
        return None, None, None

    db_1 = Database(server1, db1, options)
    db_2 = Database(server2, db2, options)

    # Get tables definitions.
    table_1 = db_1.get_object_definition(db1, name1, 'TABLE')[0]
    table_2 = db_2.get_object_definition(db2, name2, 'TABLE')[0]

    # Check table options.
    table1_opts = db_1.get_table_options(db1, name1)
    table2_opts = db_2.get_table_options(db2, name2)
    diff = _get_diff(table1_opts, table2_opts, object1, object2, diff_type)

    # Check if both tables have the same columns definition.
    # Discard column order.
    table_1_cols = [col[1:] for col in table_1[1]]
    table_2_cols = [col[1:] for col in table_2[1]]
    same_cols_def = set(table_1_cols) == set(table_2_cols)

    # Check if both tables have the same partition options.
    # Discard partition name.
    table_1_part = [part[1:] for part in table_1[2]]
    table_2_part = [part[1:] for part in table_2[2]]
    same_partition_opts = set(table_1_part) == set(table_2_part)

    # Return tables check results.
    return same_cols_def, diff, same_partition_opts
예제 #33
0
def _get_transform(server1, server2, object1, object2, options):
    """Get the transformation SQL statements
    
    This method generates the SQL statements to transform the destination
    object based on direction of the compare.
    
    server1[in]        first server connection
    server2[in]        second server connection
    object1            the first object in the compare in the form: (db.name)
    object2            the second object in the compare in the form: (db.name)
    options[in]        a dictionary containing the options for the operation:
                       (quiet, etc.)

    Returns tuple - (bool - same db name?, list of transformation statements)
    """
    from mysql.utilities.common.database import Database
    from mysql.utilities.common.sql_transform import SQLTransformer

    obj_type = None
    direction = options.get("changes-for", "server1")

    # If there is no dot, we do not have the format 'db_name.obj_name' for
    # object1 and therefore must treat it as a database name.
    if object1.find('.') == -1:
        obj_type = "DATABASE"

        # We are working with databases so db and name need to be set
        # to the database name to tell the get_object_definition() method
        # to retrieve the database information.
        db1 = object1
        db2 = object2
        name1 = object1
        name2 = object2
    else:
        try:
            db1, name1 = object1.split('.')
            db2, name2 = object2.split('.')
        except:
            raise UtilError("Invalid object name arguments for _get_transform"
                            "(): %s, %s." % (object1, object2))

    db_1 = Database(server1, db1, options)
    db_2 = Database(server2, db2, options)

    if obj_type is None:
        obj_type = db_1.get_object_type(name1)

    transform_str = []
    obj1 = db_1.get_object_definition(db1, name1, obj_type)
    obj2 = db_2.get_object_definition(db2, name2, obj_type)

    # Get the transformation based on direction.
    transform_str = []
    same_db_name = True
    xform = SQLTransformer(db_1, db_2, obj1[0], obj2[0], obj_type,
                           options.get('verbosity', 0))

    differences = xform.transform_definition()
    if differences is not None and len(differences) > 0:
        transform_str.extend(differences)

    return transform_str
예제 #34
0
def database_compare(server1_val, server2_val, db1, db2, options):
    """Perform a consistency check among two databases

    This method performs a database consistency check among two databases which
    ensures the databases exist, the objects match in number and type, the row
    counts match for all tables, and the data for each matching tables is
    consistent.

    If any errors or differences are found, the operation stops and the
    difference is printed.

    The following steps are therefore performed:

    1) check to make sure the databases exist and are the same definition
    2) check to make sure the same objects exist in each database
    3) for each object, ensure the object definitions match among the databases
    4) for each table, ensure the row counts are the same
    5) for each table, ensure the data is the same

    By default, the operation stops on any failure of any test. The caller can
    override this behavior by specifying run_all_tests = True in the options
    dictionary.

    TODO:   allow the user to skip object types (e.g. --skip-triggers, et. al.)

    server1_val[in]    a dictionary containing connection information for the
                       first server including:
                       (user, password, host, port, socket)
    server2_val[in]    a dictionary containing connection information for the
                       second server including:
                       (user, password, host, port, socket)
    db1[in]            the first database in the compare
    db2[in]            the second database in the compare
    options[in]        a dictionary containing the options for the operation:
                       (quiet, verbosity, difftype, run_all_tests)

    Returns bool True if all object match, False if partial match
    """

    _check_option_defaults(options)

    # Connect to servers
    server1, server2 = server_connect(server1_val, server2_val, db1, db2,
                                      options)

    # Check to see if databases exist
    db1_conn = Database(server1, db1, options)
    if not db1_conn.exists():
        raise UtilDBError(_ERROR_DB_MISSING.format(db1))

    db2_conn = Database(server2, db2, options)
    if not db2_conn.exists():
        raise UtilDBError(_ERROR_DB_MISSING.format(db2))

    # Print a different message is server2 is not defined
    if not server2_val:
        message = "# Checking databases {0} and {1} on server1\n#"
    else:
        message = "# Checking databases {0} on server1 and {1} on server2\n#"
    print(message.format(db1_conn.db_name, db2_conn.db_name))

    # Check for database existence and CREATE differences
    _check_databases(server1, server2, db1_conn.q_db_name, db2_conn.q_db_name,
                     options)

    # Get common objects and report discrepancies
    (in_both, differs) = _check_objects(server1, server2, db1, db2, db1_conn,
                                        db2_conn, options)
    success = not differs

    reporter = _CompareDBReport(options)
    reporter.print_heading()

    # Remaining operations can occur in a loop one for each object.
    for item in in_both:
        error_list = []
        debug_msgs = []
        # Set the object type
        obj_type = item[0]

        q_obj1 = "{0}.{1}".format(quote_with_backticks(db1),
                                  quote_with_backticks(item[1][0]))
        q_obj2 = "{0}.{1}".format(quote_with_backticks(db2),
                                  quote_with_backticks(item[1][0]))

        reporter.report_object(obj_type, item[1][0])

        # Check for differences in CREATE
        errors = _compare_objects(server1, server2, q_obj1, q_obj2, reporter,
                                  options, obj_type)
        error_list.extend(errors)

        # Check row counts
        if obj_type == 'TABLE':
            errors = _check_row_counts(server1, server2, q_obj1, q_obj2,
                                       reporter, options)
            if len(errors) != 0:
                error_list.extend(errors)
        else:
            reporter.report_state("-")

        # Check data consistency for tables
        if obj_type == 'TABLE':
            errors, debug_msgs = _check_data_consistency(
                server1, server2, q_obj1, q_obj2, reporter, options)
            if len(errors) != 0:
                error_list.extend(errors)
        else:
            reporter.report_state("-")

        if options['verbosity'] > 0:
            print
            get_create_object(server1, q_obj1, options, obj_type)
            get_create_object(server2, q_obj2, options, obj_type)

        if debug_msgs and options['verbosity'] > 2:
            reporter.report_errors(debug_msgs)

        reporter.report_errors(error_list)

        # Fail if errors are found
        if error_list:
            success = False

    return success
예제 #35
0
def copy_db(src_val, dest_val, db_list, options):
    """Copy a database

    This method will copy a database and all of its objects and data from
    one server (source) to another (destination). Options are available to
    selectively ignore each type of object. The do_drop parameter is
    used to permit the copy to overwrite an existing destination database
    (default is to not overwrite).

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, verbose, do_drop, quiet,
                       connections, debug, exclude_names, exclude_patterns)

    Notes:
        do_drop  - if True, the database on the destination will be dropped
                   if it exists (default is False)
        quiet    - do not print any information during operation
                   (default is False)

    Returns bool True = success, False = error
    """
    verbose = options.get("verbose", False)
    quiet = options.get("quiet", False)
    do_drop = options.get("do_drop", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)
    skip_data = options.get("skip_data", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_tables = options.get("skip_tables", False)
    skip_gtid = options.get("skip_gtid", False)
    locking = options.get("locking", "snapshot")

    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(src_val, dest_val, conn_options)
    cloning = (src_val == dest_val) or dest_val is None

    source = servers[0]
    if cloning:
        destination = servers[0]
    else:
        destination = servers[1]
        # Test if SQL_MODE is 'NO_BACKSLASH_ESCAPES' in the destination server
        if destination.select_variable("SQL_MODE") == "NO_BACKSLASH_ESCAPES":
            print(
                "# WARNING: The SQL_MODE in the destination server is "
                "'NO_BACKSLASH_ESCAPES', it will be changed temporarily "
                "for data insertion.")

    src_gtid = source.supports_gtid() == 'ON'
    dest_gtid = destination.supports_gtid() == 'ON' if destination else False

    # Get list of all databases from source if --all is specified.
    # Ignore system databases.
    if options.get("all", False):
        # The --all option is valid only if not cloning.
        if not cloning:
            if not quiet:
                print "# Including all databases."
            rows = source.get_all_databases()
            for row in rows:
                db_list.append((row[0], None))  # Keep same name
        else:
            raise UtilError("Cannot copy all databases on the same server.")
    elif not skip_gtid and src_gtid:
        # Check to see if this is a full copy (complete backup)
        all_dbs = source.exec_query("SHOW DATABASES")
        dbs = [db[0] for db in db_list]
        for db in all_dbs:
            if db[0].upper() in [
                    "MYSQL", "INFORMATION_SCHEMA", "PERFORMANCE_SCHEMA"
            ]:
                continue
            if not db[0] in dbs:
                print _GTID_BACKUP_WARNING
                break

    # Do error checking and preliminary work:
    #  - Check user permissions on source and destination for all databases
    #  - Check to see if executing on same server but same db name (error)
    #  - Build list of tables to lock for copying data (if no skipping data)
    #  - Check storage engine compatibility
    for db_name in db_list:
        source_db = Database(source, db_name[0])
        if destination is None:
            destination = source
        if db_name[1] is None:
            db = db_name[0]
        else:
            db = db_name[1]
        dest_db = Database(destination, db)

        # Make a dictionary of the options
        access_options = {
            'skip_views': skip_views,
            'skip_procs': skip_procs,
            'skip_funcs': skip_funcs,
            'skip_grants': skip_grants,
            'skip_events': skip_events,
            'skip_triggers': skip_triggers,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)

        # Make a dictionary containing the list of objects from source db
        source_objects = {
            "views": source_db.get_db_objects("VIEW", columns="full"),
            "procs": source_db.get_db_objects("PROCEDURE", columns="full"),
            "funcs": source_db.get_db_objects("FUNCTION", columns="full"),
            "events": source_db.get_db_objects("EVENT", columns="full"),
            "triggers": source_db.get_db_objects("TRIGGER", columns="full"),
        }

        dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                   access_options, source_objects, do_drop)

        # Error is source db and destination db are the same and we're cloning
        if destination == source and db_name[0] == db_name[1]:
            raise UtilError("Destination database name is same as "
                            "source - source = %s, destination = %s" %
                            (db_name[0], db_name[1]))

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilError("Source database does not exist - %s" % db_name[0])

        # Check storage engines
        check_engine_options(destination, options.get("new_engine", None),
                             options.get("def_engine", None), False,
                             options.get("quiet", False))

    # Get replication commands if rpl_mode specified.
    # if --rpl specified, dump replication initial commands
    rpl_info = None

    # Turn off foreign keys if they were on at the start
    destination.disable_foreign_key_checks(True)

    # Get GTID commands
    if not skip_gtid:
        gtid_info = get_gtid_commands(source)
        if src_gtid and not dest_gtid:
            print _NON_GTID_WARNING % ("destination", "source", "to")
        elif not src_gtid and dest_gtid:
            print _NON_GTID_WARNING % ("source", "destination", "from")
    else:
        gtid_info = None
        if src_gtid and not cloning:
            print _GTID_WARNING

    # If cloning, turn off gtid generation
    if gtid_info and cloning:
        gtid_info = None
    # if GTIDs enabled, write the GTID commands
    if gtid_info and dest_gtid:
        # Check GTID version for complete feature support
        destination.check_gtid_version()
        # Check the gtid_purged value too
        destination.check_gtid_executed()
        for cmd in gtid_info[0]:
            print "# GTID operation:", cmd
            destination.exec_query(cmd, {'fetch': False, 'commit': False})

    if options.get("rpl_mode", None):
        new_opts = options.copy()
        new_opts['multiline'] = False
        new_opts['strict'] = True
        rpl_info = get_change_master_command(src_val, new_opts)
        destination.exec_query("STOP SLAVE", {'fetch': False, 'commit': False})

    # Copy (create) objects.
    # We need to delay trigger and events to after data is loaded
    new_opts = options.copy()
    new_opts['skip_triggers'] = True
    new_opts['skip_events'] = True

    # Get the table locks unless we are cloning with lock-all
    if not (cloning and locking == 'lock-all'):
        my_lock = get_copy_lock(source, db_list, options, True)

    _copy_objects(source, destination, db_list, new_opts)

    # If we are cloning, take the write locks prior to copying data
    if cloning and locking == 'lock-all':
        my_lock = get_copy_lock(source, db_list, options, True, cloning)

    # Copy tables data
    if not skip_data and not skip_tables:

        # Copy tables
        for db_name in db_list:

            # Get a Database class instance
            db = Database(source, db_name[0], options)

            # Perform the copy
            # Note: No longer use threads, use multiprocessing instead.
            db.init()
            db.copy_data(db_name[1],
                         options,
                         destination,
                         connections=1,
                         src_con_val=src_val,
                         dest_con_val=dest_val)

    # if cloning with lock-all unlock here to avoid system table lock conflicts
    if cloning and locking == 'lock-all':
        my_lock.unlock()

    # Create triggers for all databases
    if not skip_triggers:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_events'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    # Create events for all databases
    if not skip_events:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_triggers'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    if not (cloning and locking == 'lock-all'):
        my_lock.unlock()

    # if GTIDs enabled, write the GTID-related commands
    if gtid_info and dest_gtid:
        print "# GTID operation:", gtid_info[1]
        destination.exec_query(gtid_info[1])

    if options.get("rpl_mode", None):
        for cmd in rpl_info[_RPL_COMMANDS]:
            if cmd[0] == '#' and not quiet:
                print cmd
            else:
                if verbose:
                    print cmd
                destination.exec_query(cmd)
        destination.exec_query("START SLAVE;")

    # Turn on foreign keys if they were on at the start
    destination.disable_foreign_key_checks(False)

    if not quiet:
        print "#...done."
    return True
예제 #36
0
def copy_db(src_val, dest_val, db_list, options):
    """Copy a database

    This method will copy a database and all of its objects and data from
    one server (source) to another (destination). Options are available to
    selectively ignore each type of object. The force parameter is
    used to permit the copy to overwrite an existing destination database
    (default is to not overwrite).

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, verbose, force, quiet,
                       connections, debug, exclude_names, exclude_patterns)

    Notes:
        force    - if True, the database on the destination will be dropped
                   if it exists (default is False)
        quiet    - do not print any information during operation
                   (default is False)

    Returns bool True = success, False = error
    """
    verbose = options.get("verbose", False)
    quiet = options.get("quiet", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)
    skip_data = options.get("skip_data", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_tables = options.get("skip_tables", False)
    skip_gtid = options.get("skip_gtid", False)
    locking = options.get("locking", "snapshot")

    conn_options = {
        'quiet': quiet,
        'version': "5.1.30",
    }
    servers = connect_servers(src_val, dest_val, conn_options)
    cloning = (src_val == dest_val) or dest_val is None

    source = servers[0]
    if cloning:
        destination = servers[0]
    else:
        destination = servers[1]

    src_gtid = source.supports_gtid() == 'ON'
    dest_gtid = destination.supports_gtid() == 'ON'if destination else False

    # Get list of all databases from source if --all is specified.
    # Ignore system databases.
    if options.get("all", False):
        # The --all option is valid only if not cloning.
        if not cloning:
            if not quiet:
                print "# Including all databases."
            rows = source.get_all_databases()
            for row in rows:
                db_list.append((row[0], None))  # Keep same name
        else:
            raise UtilError("Cannot copy all databases on the same server.")
    elif not skip_gtid and src_gtid:
        # Check to see if this is a full copy (complete backup)
        all_dbs = source.exec_query("SHOW DATABASES")
        dbs = [db[0] for db in db_list]
        for db in all_dbs:
            if db[0].upper() in ["MYSQL", "INFORMATION_SCHEMA",
                                 "PERFORMANCE_SCHEMA"]:
                continue
            if not db[0] in dbs:
                print _GTID_BACKUP_WARNING
                break

    # Do error checking and preliminary work:
    #  - Check user permissions on source and destination for all databases
    #  - Check to see if executing on same server but same db name (error)
    #  - Build list of tables to lock for copying data (if no skipping data)
    #  - Check storage engine compatibility
    for db_name in db_list:
        source_db = Database(source, db_name[0])
        if destination is None:
            destination = source
        if db_name[1] is None:
            db = db_name[0]
        else:
            db = db_name[1]
        dest_db = Database(destination, db)

        # Make a dictionary of the options
        access_options = {
            'skip_views': skip_views,
            'skip_procs': skip_procs,
            'skip_funcs': skip_funcs,
            'skip_grants': skip_grants,
            'skip_events': skip_events,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)

        dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                   access_options)

        # Error is source db and destination db are the same and we're cloning
        if destination == source and db_name[0] == db_name[1]:
            raise UtilError("Destination database name is same as "
                            "source - source = %s, destination = %s" %
                            (db_name[0], db_name[1]))

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilError("Source database does not exist - %s" % db_name[0])

        # Check storage engines
        check_engine_options(destination,
                             options.get("new_engine", None),
                             options.get("def_engine", None),
                             False, options.get("quiet", False))

    # Get replication commands if rpl_mode specified.
    # if --rpl specified, dump replication initial commands
    rpl_info = None

    # Turn off foreign keys if they were on at the start
    destination.disable_foreign_key_checks(True)

    # Get GTID commands
    if not skip_gtid:
        gtid_info = get_gtid_commands(source)
        if src_gtid and not dest_gtid:
            print _NON_GTID_WARNING % ("destination", "source", "to")
        elif not src_gtid and dest_gtid:
            print _NON_GTID_WARNING % ("source", "destination", "from")
    else:
        gtid_info = None
        if src_gtid and not cloning:
            print _GTID_WARNING

    # If cloning, turn off gtid generation
    if gtid_info and cloning:
        gtid_info = None
    # if GTIDs enabled, write the GTID commands
    if gtid_info and dest_gtid:
        # Check GTID version for complete feature support
        destination.check_gtid_version()
        # Check the gtid_purged value too
        destination.check_gtid_executed()
        for cmd in gtid_info[0]:
            print "# GTID operation:", cmd
            destination.exec_query(cmd, {'fetch': False, 'commit': False})

    if options.get("rpl_mode", None):
        new_opts = options.copy()
        new_opts['multiline'] = False
        new_opts['strict'] = True
        rpl_info = get_change_master_command(src_val, new_opts)
        destination.exec_query("STOP SLAVE", {'fetch': False, 'commit': False})

    # Copy (create) objects.
    # We need to delay trigger and events to after data is loaded
    new_opts = options.copy()
    new_opts['skip_triggers'] = True
    new_opts['skip_events'] = True

    # Get the table locks unless we are cloning with lock-all
    if not (cloning and locking == 'lock-all'):
        my_lock = get_copy_lock(source, db_list, options, True)

    _copy_objects(source, destination, db_list, new_opts)

    # If we are cloning, take the write locks prior to copying data
    if cloning and locking == 'lock-all':
        my_lock = get_copy_lock(source, db_list, options, True, cloning)

    # Copy tables data
    if not skip_data and not skip_tables:

        # Copy tables
        for db_name in db_list:

            # Get a Database class instance
            db = Database(source, db_name[0], options)

            # Perform the copy
            # Note: No longer use threads, use multiprocessing instead.
            db.init()
            db.copy_data(db_name[1], options, destination, connections=1,
                         src_con_val=src_val, dest_con_val=dest_val)

    # if cloning with lock-all unlock here to avoid system table lock conflicts
    if cloning and locking == 'lock-all':
        my_lock.unlock()

    # Create triggers for all databases
    if not skip_triggers:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_events'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    # Create events for all databases
    if not skip_events:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_triggers'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    if not (cloning and locking == 'lock-all'):
        my_lock.unlock()

    # if GTIDs enabled, write the GTID-related commands
    if gtid_info and dest_gtid:
        print "# GTID operation:", gtid_info[1]
        destination.exec_query(gtid_info[1])

    if options.get("rpl_mode", None):
        for cmd in rpl_info[_RPL_COMMANDS]:
            if cmd[0] == '#' and not quiet:
                print cmd
            else:
                if verbose:
                    print cmd
                destination.exec_query(cmd)
        destination.exec_query("START SLAVE;")

    # Turn on foreign keys if they were on at the start
    destination.disable_foreign_key_checks(False)

    if not quiet:
        print "#...done."
    return True
예제 #37
0
def object_diff(server1_val, server2_val, object1, object2, options,
                object_type=None):
    """diff the definition of two objects

    Find the difference among two object definitions.

    server1_val[in]    a dictionary containing connection information for the
                       first server including:
                       (user, password, host, port, socket)
    server2_val[in]    a dictionary containing connection information for the
                       second server including:
                       (user, password, host, port, socket)
    object1[in]        the first object in the compare in the form: (db.name)
    object2[in]        the second object in the compare in the form: (db.name)
    options[in]        a dictionary containing the options for the operation:
                       (quiet, verbosity, difftype)
    object_type[in]    type of the objects to be compared (e.g., TABLE,
                       PROCEDURE, etc.). By default None (not defined).

    Returns None = objects are the same, diff[] = tables differ
    """
    server1, server2 = server_connect(server1_val, server2_val,
                                      object1, object2, options)

    force = options.get("force", None)
    # Get the object type if unknown considering that objects of different
    # types can be found with the same name.
    if not object_type:
        # Get object types of object1
        sql_mode = server1.select_variable("SQL_MODE")
        db_name, obj_name = parse_object_name(object1, sql_mode)
        db = Database(server1, db_name, options)
        obj1_types = db.get_object_type(obj_name)
        if not obj1_types:
            msg = "The object {0} does not exist.".format(object1)
            if not force:
                raise UtilDBError(msg)
            print("ERROR: {0}".format(msg))
            return []

        # Get object types of object2
        sql_mode = server2.select_variable("SQL_MODE")
        db_name, obj_name = parse_object_name(object2, sql_mode)
        db = Database(server2, db_name, options)
        obj2_types = db.get_object_type(obj_name)
        if not obj2_types:
            msg = "The object {0} does not exist.".format(object2)
            if not force:
                raise UtilDBError(msg)
            print("ERROR: {0}".format(msg))
            return []

        # Merge types found for both objects
        obj_types = set(obj1_types + obj2_types)

        # Diff objects considering all types found
        result = []
        for obj_type in obj_types:
            res = diff_objects(server1, server2, object1, object2, options,
                               obj_type)
            if res:
                result.append(res)
        return result if len(result) > 0 else None
    else:
        # Diff objects of known type
        return diff_objects(server1, server2, object1, object2, options,
                            object_type)
예제 #38
0
def _export_metadata(source, db_list, output_file, options):
    """Export metadata from the specified list of databases.

    This private method retrieves the objects metadata for each database listed
    in the form of CREATE (SQL) statements or in a tabular form (GRID, TAB,
    CSV, VERTICAL) to the specified file.

    This private method does not check permissions.

    source[in]         Server instance.
    db_list[in]        List of databases to export.
    output_file[in]    Output file to store the metadata information.
    options[in]        Dictionary containing the options for the export:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format,
                       debug, exclude_names, exclude_patterns)
    """
    frmt = options.get("format", "sql")
    no_headers = options.get("no_headers", False)
    column_type = options.get("display", "brief")
    quiet = options.get("quiet", False)
    skip_create = options.get("skip_create", False)
    skip_tables = options.get("skip_tables", False)
    skip_views = options.get("skip_views", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)
    sql_mode = source.select_variable("SQL_MODE")

    for db_name in db_list:

        # Get a Database class instance
        db = Database(source, db_name, options)

        # Export database metadata
        if not quiet:
            output_file.write("# Exporting metadata from {0}\n".format(
                db.q_db_name))

        # Perform the extraction
        if frmt == "sql":
            db.init()
            if not skip_create:
                output_file.write("DROP DATABASE IF EXISTS {0};\n".format(
                    db.q_db_name))
                output_file.write("CREATE DATABASE {0};\n".format(
                    db.q_db_name))
            output_file.write("USE {0};\n".format(db.q_db_name))
            for dbobj in db.get_next_object():
                if dbobj[0] == "GRANT" and not skip_grants:
                    if not quiet:
                        output_file.write("# Grant:\n")
                    if dbobj[1][3]:
                        create_str = "GRANT {0} ON {1}.{2} TO {3};\n".format(
                            dbobj[1][1], db.q_db_name,
                            quote_with_backticks(dbobj[1][3], sql_mode),
                            dbobj[1][0])
                    else:
                        create_str = "GRANT {0} ON {1}.* TO {2};\n".format(
                            dbobj[1][1], db.q_db_name, dbobj[1][0])
                    output_file.write(create_str)
                else:
                    if not quiet:
                        output_file.write("# {0}: {1}.{2}\n".format(
                            dbobj[0], db.q_db_name,
                            quote_with_backticks(dbobj[1][0], sql_mode)))
                    if (dbobj[0] == "PROCEDURE" and not skip_procs) or \
                       (dbobj[0] == "FUNCTION" and not skip_funcs) or \
                       (dbobj[0] == "EVENT" and not skip_events) or \
                       (dbobj[0] == "TRIGGER" and not skip_triggers):
                        output_file.write("DELIMITER ||\n")
                    output_file.write("{0};\n".format(
                        db.get_create_statement(db.db_name, dbobj[1][0],
                                                dbobj[0])))
                    if (dbobj[0] == "PROCEDURE" and not skip_procs) or \
                       (dbobj[0] == "FUNCTION" and not skip_funcs) or \
                       (dbobj[0] == "EVENT" and not skip_events) or \
                       (dbobj[0] == "TRIGGER" and not skip_triggers):
                        output_file.write("||\n")
                        output_file.write("DELIMITER ;\n")
        else:
            objects = []
            if not skip_tables:
                objects.append("TABLE")
            if not skip_funcs:
                objects.append("FUNCTION")
            if not skip_procs:
                objects.append("PROCEDURE")
            if not skip_views:
                objects.append("VIEW")
            if not skip_triggers:
                objects.append("TRIGGER")
            if not skip_events:
                objects.append("EVENT")
            if not skip_grants:
                objects.append("GRANT")
            for obj_type in objects:
                output_file.write("# {0}S in {1}:".format(
                    obj_type, db.q_db_name))
                if frmt in ('grid', 'vertical'):
                    rows = db.get_db_objects(obj_type, column_type, True)
                else:
                    rows = db.get_db_objects(obj_type, column_type, True, True)
                if len(rows[1]) < 1:
                    output_file.write(" (none found)\n")
                else:
                    output_file.write("\n")
                    # Cannot use print_list here because we must manipulate
                    # the behavior of format_tabular_list.
                    list_options = {}
                    if frmt == "vertical":
                        format_vertical_list(output_file, rows[0], rows[1])
                    elif frmt == "tab":
                        list_options['print_header'] = not no_headers
                        list_options['separator'] = '\t'
                        format_tabular_list(output_file, rows[0], rows[1],
                                            list_options)
                    elif frmt == "csv":
                        list_options['print_header'] = not no_headers
                        list_options['separator'] = ','
                        format_tabular_list(output_file, rows[0], rows[1],
                                            list_options)
                    else:  # default to table format
                        format_tabular_list(output_file, rows[0], rows[1])

    if not quiet:
        output_file.write("#...done.\n")
예제 #39
0
def database_compare(server1_val, server2_val, db1, db2, options):
    """Perform a consistency check among two databases

    This method performs a database consistency check among two databases which
    ensures the databases exist, the objects match in number and type, the row
    counts match for all tables, and the data for each matching tables is
    consistent.

    If any errors or differences are found, the operation stops and the
    difference is printed.

    The following steps are therefore performed:

    1) check to make sure the databases exist and are the same definition
    2) check to make sure the same objects exist in each database
    3) for each object, ensure the object definitions match among the databases
    4) for each table, ensure the row counts are the same
    5) for each table, ensure the data is the same

    By default, the operation stops on any failure of any test. The caller can
    override this behavior by specifying run_all_tests = True in the options
    dictionary.

    TODO:   allow the user to skip object types (e.g. --skip-triggers, et. al.)

    server1_val[in]    a dictionary containing connection information for the
                       first server including:
                       (user, password, host, port, socket)
    server2_val[in]    a dictionary containing connection information for the
                       second server including:
                       (user, password, host, port, socket)
    db1[in]            the first database in the compare
    db2[in]            the second database in the compare
    options[in]        a dictionary containing the options for the operation:
                       (quiet, verbosity, difftype, run_all_tests)

    Returns bool True if all object match, False if partial match
    """

    _check_option_defaults(options)

    # Connect to servers
    server1, server2 = server_connect(server1_val, server2_val,
                                      db1, db2, options)

    # Check to see if databases exist
    db1_conn = Database(server1, db1, options)
    if not db1_conn.exists():
        raise UtilDBError(_ERROR_DB_MISSING.format(db1))

    db2_conn = Database(server2, db2, options)
    if not db2_conn.exists():
        raise UtilDBError(_ERROR_DB_MISSING.format(db2))

    # Print a different message is server2 is not defined
    if not server2_val:
        message = "# Checking databases {0} and {1} on server1\n#"
    else:
        message = "# Checking databases {0} on server1 and {1} on server2\n#"
    print(message.format(db1_conn.db_name, db2_conn.db_name))

    # Check for database existence and CREATE differences
    _check_databases(server1, server2, db1_conn.q_db_name, db2_conn.q_db_name,
                     options)

    # Get common objects and report discrepancies
    (in_both, differs) = _check_objects(server1, server2, db1, db2,
                                        db1_conn, db2_conn, options)
    success = not differs

    reporter = _CompareDBReport(options)
    reporter.print_heading()

    # Remaining operations can occur in a loop one for each object.
    for item in in_both:
        error_list = []

        # Set the object type
        obj_type = item[0]

        obj1 = "{0}.{1}".format(db1, item[1][0])
        obj2 = "{0}.{1}".format(db2, item[1][0])
        q_obj1 = "{0}.{1}".format(quote_with_backticks(db1),
                                  quote_with_backticks(item[1][0]))
        q_obj2 = "{0}.{1}".format(quote_with_backticks(db2),
                                  quote_with_backticks(item[1][0]))

        reporter.report_object(obj_type, item[1][0])

        # Check for differences in CREATE
        errors = _compare_objects(server1, server2, q_obj1, q_obj2,
                                  reporter, options, obj_type)
        error_list.extend(errors)

        # Check row counts
        if obj_type == 'TABLE':
            errors = _check_row_counts(server1, server2, q_obj1, q_obj2,
                                       reporter, options)
            if len(errors) != 0:
                error_list.extend(errors)
        else:
            reporter.report_state("-")

        # Check data consistency for tables
        if obj_type == 'TABLE':
            errors = _check_data_consistency(server1, server2, q_obj1, q_obj2,
                                             reporter, options)
            if len(errors) != 0:
                error_list.extend(errors)
        else:
            reporter.report_state("-")

        if options['verbosity'] > 0:
            print
            get_create_object(server1, obj1, options, obj_type)
            get_create_object(server2, obj2, options, obj_type)

        reporter.report_errors(error_list)

        # Fail if errors are found
        if error_list:
            success = False

    return success
예제 #40
0
def get_copy_lock(server,
                  db_list,
                  options,
                  include_mysql=False,
                  cloning=False):
    """Get an instance of the Lock class with a standard copy (read) lock

    This method creates an instance of the Lock class using the lock type
    specified in the options. It is used to initiate the locks for the copy
    and related operations.

    server[in]             Server instance for locking calls
    db_list[in]            list of database names
    options[in]            option dictionary
                           Must include the skip_* options for copy and export
    include_mysql[in]      if True, include the mysql tables for copy operation
    cloning[in]            if True, create lock tables with WRITE on dest db
                           Default = False

    Returns Lock - Lock class instance
    """
    rpl_mode = options.get("rpl_mode", None)
    locking = options.get('locking', 'snapshot')

    # Determine if we need to use FTWRL. There are two conditions:
    #  - running on master (rpl_mode = 'master')
    #  - using locking = 'lock-all' and rpl_mode present
    if (rpl_mode in ["master", "both"]) or \
            (rpl_mode and locking == 'lock-all'):
        new_opts = options.copy()
        new_opts['locking'] = 'flush'
        lock = Lock(server, [], new_opts)

    # if this is a lock-all type and not replication operation,
    # find all tables and lock them
    # pylint: disable=R0101
    elif locking == 'lock-all':
        table_lock_list = []

        # Build table lock list
        for db_name in db_list:
            db = db_name[0] if isinstance(db_name, tuple) else db_name
            source_db = Database(server, db)
            tables = source_db.get_db_objects("TABLE")
            for table in tables:
                table_lock_list.append(("{0}.{1}".format(db,
                                                         table[0]), 'READ'))
                # Cloning requires issuing WRITE locks because we use same
                # conn.
                # Non-cloning will issue WRITE lock on a new destination conn.
                if cloning:
                    if db_name[1] is None:
                        db_clone = db_name[0]
                    else:
                        db_clone = db_name[1]
                    # For cloning, we use the same connection so we need to
                    # lock the destination tables with WRITE.
                    table_lock_list.append(
                        ("{0}.{1}".format(db_clone, table[0]), 'WRITE'))
            # We must include views for server version 5.5.3 and higher
            if server.check_version_compat(5, 5, 3):
                tables = source_db.get_db_objects("VIEW")
                for table in tables:
                    table_lock_list.append(
                        ("{0}.{1}".format(db, table[0]), 'READ'))
                    # Cloning requires issuing WRITE locks because we use same
                    # conn.
                    # Non-cloning will issue WRITE lock on a new destination
                    # conn.
                    if cloning:
                        if db_name[1] is None:
                            db_clone = db_name[0]
                        else:
                            db_clone = db_name[1]
                        # For cloning, we use the same connection so we need to
                        # lock the destination tables with WRITE.
                        table_lock_list.append(
                            ("{0}.{1}".format(db_clone, table[0]), 'WRITE'))

        # Now add mysql tables
        if include_mysql:
            # Don't lock proc tables if no procs of funcs are being read
            if not options.get('skip_procs', False) and \
               not options.get('skip_funcs', False):
                table_lock_list.append(("mysql.proc", 'READ'))
                table_lock_list.append(("mysql.procs_priv", 'READ'))
            # Don't lock event table if events are skipped
            if not options.get('skip_events', False):
                table_lock_list.append(("mysql.event", 'READ'))
        lock = Lock(server, table_lock_list, options)

    # Use default or no locking option
    else:
        lock = Lock(server, [], options)

    return lock
예제 #41
0
def export_data(source, src_val, db_list, options):
    """Produce data for the tables in a database.

    This method retrieves the data for each table in the databases listed in
    the form of BULK INSERT (SQL) statements or in a tabular form to the file
    specified. The valid values for the format parameter are SQL, CSV, TSV,
    VERITCAL, or GRID.

    source[in]         Server instance
    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format, file_per_tbl,
                       and debug)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.table import Table

    format = options.get("format", "sql")
    no_headers = options.get("no_headers", True)
    column_type = options.get("display", "brief")
    single = options.get("single", False)
    skip_blobs = options.get("skip_blobs", False)
    quiet = options.get("quiet", False)
    file_per_table = options.get("file_per_tbl", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)

    if options.get("all", False):
        rows = source.get_all_databases()
        for row in rows:
            if row[0] not in db_list:
                db_list.append(row[0])
                
    # Check if database exists and user permissions on source for all databases
    table_lock_list = []
    table_list = []
    for db_name in db_list:
        source_db = Database(source, db_name)

        # Make a dictionary of the options
        access_options = {
            'skip_views'  : skip_views,
            'skip_procs'  : skip_procs,
            'skip_funcs'  : skip_funcs,
            'skip_grants' : skip_grants,
            'skip_events' : skip_events,
        }

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilDBError("Source database does not exist - %s" % db_name,
                              -1, db_name)
            
        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)

        # Build table list
        tables = source_db.get_db_objects("TABLE")
        for table in tables:
            table_list.append((db_name, table[0]))
        
    old_db = ""
    for table in table_list:
        db_name = table[0]
        tbl_name = "%s.%s" % (db_name, table[1])
        # quote database and table name with backticks
        q_db_name = quote_with_backticks(db_name)
        q_tbl_name = "%s.%s" % (q_db_name, quote_with_backticks(table[1]))
        if not quiet and old_db != db_name:
            old_db = db_name
            if format == "sql":
               print "USE %s;" % q_db_name
            print "# Exporting data from %s" % db_name
            if file_per_table:
                print "# Writing table data to files."

        tbl_options = {
            'verbose'  : False,
            'get_cols' : True,
            'quiet'    : quiet
        }
        cur_table = Table(source, q_tbl_name, tbl_options)
        if single and format not in ("sql", "grid", "vertical"):
            retrieval_mode = -1
            first = True
        else:
            retrieval_mode = 1
            first = False

        message = "# Data for table %s: " % q_tbl_name

        # switch for writing to files
        if file_per_table:
            if format == 'sql':
               file_name = tbl_name + ".sql"
            else:
                file_name = tbl_name + ".%s" % format.lower()
            outfile = open(file_name, "w")
            outfile.write(message + "\n")
        else:
            outfile = None
            print message

        for data_rows in cur_table.retrieve_rows(retrieval_mode):
            _export_row(data_rows, cur_table, format, single,
                        skip_blobs, first, no_headers, outfile)
            if first:
               first = False
 
        if file_per_table:
            outfile.close()
  
    if not quiet:
        print "#...done."

    return True
예제 #42
0
def validate_obj_type_dict(server, obj_type_dict):
    """Validates the dictionary of objects against the specified server

    This function builds a dict with the types of the objects in
    obj_type_dict, filtering out non existing databases and objects.

    Returns a dictionary with only the existing objects, using  object_types
    as keys and as values a list of tuples (<DB NAME>, <OBJ_NAME>).
    """
    valid_obj_dict = defaultdict(list)
    server_dbs = set(row[0] for row in
                     server.get_all_databases(
                         ignore_internal_dbs=False))
    argument_dbs = set(obj_type_dict.keys())

    # Get non existing_databases and dbs to check
    non_existing_dbs = argument_dbs.difference(server_dbs)
    dbs_to_check = server_dbs.intersection(argument_dbs)

    if non_existing_dbs:
        if len(non_existing_dbs) > 1:
            plurals = ('s', '', 'them')
        else:
            plurals = ('', 'es', 'it')
        print('# WARNING: specified database{0} do{1} not '
              'exist on base server and will be skipped along '
              'any tables and routines belonging to {2}: '
              '{3}.'.format(plurals[0], plurals[1], plurals[2],
                            ", ".join(non_existing_dbs)))

    # Now for each db that actually exists, get the type of the specified
    # objects
    for db_name in dbs_to_check:
        db = Database(server, db_name)
        # quote database name if necessary
        quoted_db_name = db_name
        if not is_quoted_with_backticks(db_name):
            quoted_db_name = quote_with_backticks(db_name)
        for obj_name in obj_type_dict[db_name]:
            if obj_name is None:
                # We must consider the database itself
                valid_obj_dict[DATABASE_TYPE].append((quoted_db_name,
                                                      quoted_db_name))
            else:
                # get quoted name for obj_name
                quoted_obj_name = obj_name
                if not is_quoted_with_backticks(obj_name):
                    quoted_obj_name = quote_with_backticks(obj_name)

                # Test if the object exists and if it does, test if it
                # is one of the supported object types, else
                # print a warning and skip the object
                obj_type = db.get_object_type(obj_name)
                if obj_type is None:
                    print("# WARNING: specified object does not exist. "
                          "{0}.{1} will be skipped."
                          "".format(quoted_db_name, quoted_obj_name))
                elif 'PROCEDURE' in obj_type or 'FUNCTION' in obj_type:
                    valid_obj_dict[ROUTINE_TYPE].append((quoted_db_name,
                                                         quoted_obj_name))
                elif 'TABLE' in obj_type:
                    valid_obj_dict[TABLE_TYPE].append((quoted_db_name,
                                                       quoted_obj_name))
                else:
                    print('# WARNING: specified object is not supported '
                          '(not a DATABASE, FUNCTION, PROCEDURE or TABLE),'
                          ' as such it will be skipped: {0}.{1}.'
                          ''.format(quoted_db_name, quoted_obj_name))
    return valid_obj_dict
예제 #43
0
def copy_db(src_val, dest_val, db_list, options):
    """Copy a database

    This method will copy a database and all of its objects and data from
    one server (source) to another (destination). Options are available to
    selectively ignore each type of object. The force parameter is
    used to permit the copy to overwrite an existing destination database
    (default is to not overwrite).

    src_val[in]        a dictionary containing connection information for the
                       source including:
                       (user, password, host, port, socket)
    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    options[in]        a dictionary containing the options for the copy:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, verbose, force, quiet,
                       connections, debug, exclude_names, exclude_patterns)

    Notes:
        force    - if True, the database on the destination will be dropped
                   if it exists (default is False)
        quiet    - do not print any information during operation
                   (default is False)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.options import check_engine_options
    from mysql.utilities.common.server import connect_servers
    from mysql.utilities.command.dbexport import get_change_master_command

    verbose = options.get("verbose", False)
    quiet = options.get("quiet", False)
    skip_views = options.get("skip_views", False)
    skip_procs = options.get("skip_procs", False)
    skip_funcs = options.get("skip_funcs", False)
    skip_events = options.get("skip_events", False)
    skip_grants = options.get("skip_grants", False)
    skip_data = options.get("skip_data", False)
    skip_triggers = options.get("skip_triggers", False)
    skip_tables = options.get("skip_tables", False)
    locking = options.get("locking", "snapshot")

    rpl_info = ([], None)

    conn_options = {
        'quiet'     : quiet,
        'version'   : "5.1.30",
    }
    servers = connect_servers(src_val, dest_val, conn_options)

    source = servers[0]
    destination = servers[1]

    cloning = (src_val == dest_val) or dest_val is None
    
    # Get list of all databases from source if --all is specified.
    # Ignore system databases.
    if options.get("all", False):
        # The --all option is valid only if not cloning.
        if not cloning:
            if not quiet:
                print "# Including all databases."
            rows = source.get_all_databases()
            for row in rows:
                db_list.append((row[0], None)) # Keep same name
        else:
            raise UtilError("Cannot copy all databases on the same server.")

    # Do error checking and preliminary work:
    #  - Check user permissions on source and destination for all databases
    #  - Check to see if executing on same server but same db name (error)
    #  - Build list of tables to lock for copying data (if no skipping data)
    #  - Check storage engine compatibility
    for db_name in db_list:
        source_db = Database(source, db_name[0])
        if destination is None:
            destination = source
        if db_name[1] is None:
            db = db_name[0]
        else:
            db = db_name[1]
        dest_db = Database(destination, db)
        
        # Make a dictionary of the options
        access_options = {
            'skip_views'  : skip_views,
            'skip_procs'  : skip_procs,
            'skip_funcs'  : skip_funcs,
            'skip_grants' : skip_grants,
            'skip_events' : skip_events,
        }

        source_db.check_read_access(src_val["user"], src_val["host"],
                                    access_options)
        
        dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                   access_options)

        # Error is source db and destination db are the same and we're cloning
        if destination == source and db_name[0] == db_name[1]:
            raise UtilError("Destination database name is same as "
                                 "source - source = %s, destination = %s" %
                                 (db_name[0], db_name[1]))

        # Error is source database does not exist
        if not source_db.exists():
            raise UtilError("Source database does not exist - %s" % db_name[0])
        
        # Check storage engines
        check_engine_options(destination,
                             options.get("new_engine", None),
                             options.get("def_engine", None),
                             False, options.get("quiet", False))

    # Get replication commands if rpl_mode specified.
    # if --rpl specified, dump replication initial commands
    if options.get("rpl_mode", None):
        new_opts = options.copy()
        new_opts['multiline'] = False
        new_opts['strict'] = True
        rpl_info = get_change_master_command(src_val, new_opts)
        destination.exec_query("STOP SLAVE;")

    # Copy objects
    # We need to delay trigger and events to after data is loaded
    new_opts = options.copy()
    new_opts['skip_triggers'] = True
    new_opts['skip_events'] = True
    
    # Get the table locks unless we are cloning with lock-all
    if not (cloning and locking == 'lock-all'):
        my_lock = get_copy_lock(source, db_list, options, True)

    _copy_objects(source, destination, db_list, new_opts)

    # If we are cloning, take the write locks prior to copying data
    if cloning and locking == 'lock-all':
        my_lock = get_copy_lock(source, db_list, options, True, cloning)

    # Copy data
    if not skip_data and not skip_tables:
    
        # Copy tables
        for db_name in db_list:
    
            # Get a Database class instance
            db = Database(source, db_name[0], options)
    
            # Perform the copy
            db.init()
            db.copy_data(db_name[1], options, destination,
                         options.get("threads", False))
            
    # if cloning with lock-all unlock here to avoid system table lock conflicts
    if cloning and locking == 'lock-all':
        my_lock.unlock()

    # Create triggers for all databases
    if not skip_triggers:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_events'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    # Create events for all databases
    if not skip_events:
        new_opts = options.copy()
        new_opts['skip_tables'] = True
        new_opts['skip_views'] = True
        new_opts['skip_procs'] = True
        new_opts['skip_funcs'] = True
        new_opts['skip_triggers'] = True
        new_opts['skip_grants'] = True
        new_opts['skip_create'] = True
        _copy_objects(source, destination, db_list, new_opts, False, False)

    if not (cloning and locking == 'lock-all'):
        my_lock.unlock()

    if options.get("rpl_mode", None):
        for cmd in rpl_info[_RPL_COMMANDS]:
            if cmd[0] == '#' and not quiet:
                print cmd
            else:
                if verbose:
                    print cmd
                destination.exec_query(cmd)
        destination.exec_query("START SLAVE;")

    if not quiet:
        print "#...done."
    return True
예제 #44
0
def object_diff(server1_val,
                server2_val,
                object1,
                object2,
                options,
                object_type=None):
    """diff the definition of two objects

    Find the difference among two object definitions.

    server1_val[in]    a dictionary containing connection information for the
                       first server including:
                       (user, password, host, port, socket)
    server2_val[in]    a dictionary containing connection information for the
                       second server including:
                       (user, password, host, port, socket)
    object1[in]        the first object in the compare in the form: (db.name)
    object2[in]        the second object in the compare in the form: (db.name)
    options[in]        a dictionary containing the options for the operation:
                       (quiet, verbosity, difftype)
    object_type[in]    type of the objects to be compared (e.g., TABLE,
                       PROCEDURE, etc.). By default None (not defined).

    Returns None = objects are the same, diff[] = tables differ
    """
    server1, server2 = server_connect(server1_val, server2_val, object1,
                                      object2, options)

    # Get the object type if unknown considering that objects of different
    # types can be found with the same name.
    if not object_type:
        #Get object types of object1
        regexp_obj = re.compile(REGEXP_QUALIFIED_OBJ_NAME)
        m_obj = regexp_obj.match(object1)
        db_name, obj_name = m_obj.groups()
        db = Database(server1, db_name, options)
        obj1_types = db.get_object_type(obj_name)
        if not obj1_types:
            raise UtilDBError("The object {0} does not exist.".format(object1))

        # Get object types of object2
        m_obj = regexp_obj.match(object2)
        db_name, obj_name = m_obj.groups()
        db = Database(server2, db_name, options)
        obj2_types = db.get_object_type(obj_name)
        if not obj2_types:
            raise UtilDBError("The object {0} does not exist.".format(object2))

        # Merge types found for both objects
        obj_types = set(obj1_types + obj2_types)

        # Diff objects considering all types found
        result = []
        for obj_type in obj_types:
            res = diff_objects(server1, server2, object1, object2, options,
                               obj_type)
            if res:
                result.append(res)
        return result if len(result) > 0 else None
    else:
        # Diff objects of known type
        return diff_objects(server1, server2, object1, object2, options,
                            object_type)
예제 #45
0
def import_file(dest_val, file_name, options):
    """Import a file

    This method reads a file and, if needed, transforms the file into
    discrete SQL statements for execution on the destination server.

    It accepts any of the formal structured files produced by the
    mysqlexport utility including formats SQL, CSV, TAB, GRID, and
    VERTICAL.

    It will read these files and skip or include the definitions or data
    as specified in the options. An error is raised for any conversion
    errors or errors while executing the statements.

    Users are highly encouraged to use the --dryrun option which will
    print the SQL statements without executing them.

    dest_val[in]       a dictionary containing connection information for the
                       destination including:
                       (user, password, host, port, socket)
    file_name[in]      name (and path) of the file to import
    options[in]        a dictionary containing the options for the import:
                       (skip_tables, skip_views, skip_triggers, skip_procs,
                       skip_funcs, skip_events, skip_grants, skip_create,
                       skip_data, no_header, display, format, and debug)

    Returns bool True = success, False = error
    """

    from mysql.utilities.common.database import Database
    from mysql.utilities.common.options import check_engine_options
    from mysql.utilities.common.table import Table
    from mysql.utilities.common.server import connect_servers

    # Helper method to dig through the definitions for create statements
    def _process_definitions(statements, table_col_list, db_name):
        # First, get the SQL strings
        sql_strs = _build_create_objects(obj_type, db_name, definitions)
        statements.extend(sql_strs)
        # Now, save the column list
        col_list = _build_col_metadata(obj_type, definitions)
        if len(col_list) > 0:
            table_col_list.extend(col_list)

    def _process_data(tbl_name, statements, columns,
                      table_col_list, table_rows, skip_blobs):
        # if there is data here, build bulk inserts
        # First, create table reference, then call insert_rows()
        tbl = Table(destination, tbl_name)
        # Need to check to see if table exists!
        if tbl.exists():
            tbl.get_column_metadata()
            col_meta = True
        elif len(table_col_list) > 0:
            col_meta = _get_column_metadata(tbl, table_col_list)
        else:
            fix_cols = []
            fix_cols.append((tbl.tbl_name, columns))
            col_meta = _get_column_metadata(tbl, fix_cols)
        if not col_meta:
            raise UtilError("Cannot build bulk insert statements without "
                                 "the table definition.")
        ins_strs = tbl.make_bulk_insert(table_rows, tbl.q_db_name)
        if len(ins_strs[0]) > 0:
            statements.extend(ins_strs[0])
        if len(ins_strs[1]) > 0 and not skip_blobs:
            for update in ins_strs[1]:
                statements.append(update)

    # Gather options
    format = options.get("format", "sql")
    no_headers = options.get("no_headers", False)
    quiet = options.get("quiet", False)
    import_type = options.get("import_type", "definitions")
    single = options.get("single", True)
    dryrun = options.get("dryrun", False)
    do_drop = options.get("do_drop", False)
    skip_blobs = options.get("skip_blobs", False)
    skip_gtid = options.get("skip_gtid", False)

    # Attempt to connect to the destination server
    conn_options = {
        'quiet'     : quiet,
        'version'   : "5.1.30",
    }
    servers = connect_servers(dest_val, None, conn_options)

    destination = servers[0]

    # Check storage engines
    check_engine_options(destination,
                         options.get("new_engine", None),
                         options.get("def_engine", None),
                         False, options.get("quiet", False))

    if not quiet:
        if import_type == "both":
            str = "definitions and data"
        else:
            str = import_type
        print "# Importing %s from %s." % (str, file_name)

    # Setup variables we will need
    skip_header = not no_headers
    if format == "sql":
        skip_header = False
    get_db = True
    check_privileges = False
    db_name = None
    file = open(file_name)
    columns = []
    read_columns = False
    table_rows = []
    obj_type = ""
    definitions = []
    statements = []
    table_col_list = []
    tbl_name = ""
    skip_rpl = options.get("skip_rpl", False)
    gtid_command_found = False
    supports_gtid = servers[0].supports_gtid() == 'ON'
    skip_gtid_warning_printed = False
    gtid_version_checked = False

    # Read the file one object/definition group at a time
    for row in read_next(file, format):
        # Check for replication command
        if row[0] == "RPL_COMMAND":
            if not skip_rpl:
                statements.append(row[1])
            continue
        if row[0] == "GTID_COMMAND":
            gtid_command_found = True
            if not supports_gtid:
                # only display warning once
                if not skip_gtid_warning_printed:
                    print _GTID_SKIP_WARNING
                    skip_gtid_warning_printed = True
            elif not skip_gtid:
                if not gtid_version_checked:
                    gtid_version_checked = True
                    # Check GTID version for complete feature support
                    servers[0].check_gtid_version()
                    # Check the gtid_purged value too
                    servers[0].check_gtid_executed("import")
                statements.append(row[1])
            continue
        # If this is the first pass, get the database name from the file
        if get_db:
            if skip_header:
                skip_header = False
            else:
                db_name = _get_db(row)
                # quote db_name with backticks if needed
                if db_name and not is_quoted_with_backticks(db_name):
                    db_name = quote_with_backticks(db_name)
                get_db = False
                if do_drop and import_type != "data":
                    statements.append("DROP DATABASE IF EXISTS %s;" % db_name)
                if import_type != "data":
                    if not _skip_object("CREATE_DB", options) and \
                       not format == 'sql':
                        statements.append("CREATE DATABASE %s;" % db_name)

        # This is the first time through the loop so we must
        # check user permissions on source for all databases
        if db_name is not None:
            dest_db = Database(destination, db_name)

            # Make a dictionary of the options
            access_options = options.copy()

            dest_db.check_write_access(dest_val['user'], dest_val['host'],
                                       access_options)
            
        # Now check to see if we want definitions, data, or both:
        if row[0] == "sql" or row[0] in _DEFINITION_LIST:
            if format != "sql" and len(row[1]) == 1:
                raise UtilError("Cannot read an import file generated with "
                                "--display=NAMES")

            if import_type in ("definitions", "both"):
                if format == "sql":
                    statements.append(row[1])
                else:
                    if obj_type == "":
                        obj_type = row[0]
                    if obj_type != row[0]:
                        if len(definitions) > 0:
                            _process_definitions(statements, table_col_list,
                                                 db_name)
                        obj_type = row[0]
                        definitions = []
                    if not _skip_object(row[0], options):
                        definitions.append(row[1])
        else:
            # see if there are any definitions to process
            if len(definitions) > 0:
                _process_definitions(statements, table_col_list, db_name)
                definitions = []

            if import_type in ("data", "both"):
                if _skip_object("DATA", options):
                    continue  # skip data
                elif format == "sql":
                    statements.append(row[1])
                else:
                    if row[0] == "BEGIN_DATA":
                        # Start of table so first row is columns.
                        if len(table_rows) > 0:
                            _process_data(tbl_name, statements, columns,
                                          table_col_list, table_rows,
                                          skip_blobs)
                            table_rows = []
                        read_columns = True
                        tbl_name = row[1]
                        if not is_quoted_with_backticks(tbl_name):
                            db, sep, tbl = tbl_name.partition('.')
                            q_db = quote_with_backticks(db)
                            q_tbl = quote_with_backticks(tbl)
                            tbl_name = ".".join([q_db, q_tbl])
                    else:
                        if read_columns:
                            columns = row[1]
                            read_columns = False
                        else:
                            if not single:
                                table_rows.append(row[1])
                            else:
                                str = _build_insert_data(columns, tbl_name,
                                                         row[1])
                                statements.append(str)

    # Process remaining definitions                                 
    if len(definitions) > 0:
        _process_definitions(statements, table_col_list, db_name)
        definitions = []

    # Process remaining data rows
    if len(table_rows) > 0:
        _process_data(tbl_name, statements, columns,
                      table_col_list, table_rows, skip_blobs)
        table_rows = []

    # Now process the statements
    _exec_statements(statements, destination, format, options, dryrun)

    file.close()
    
    # Check gtid process
    if supports_gtid and not gtid_command_found:
        print _GTID_MISSING_WARNING

    if not quiet:
        print "#...done."
    return True