Ejemplo n.º 1
0
def get_linux_disk_info(datadirs):
    #get the info related to datadir
    datadir = datadirs['@@datadir']
    output = util.output("Datadir", datadir)
    output2, datadir_physical = _get_physical_info(datadir)
    output += output2
    for key in datadirs:

        if key == "@@datadir":
            continue
        output += util.output(key[2:], datadirs[key])
        if datadirs[key] == "./" or datadirs[
                key] == datadir or not datadirs[key]:
            continue
        else:
            if datadirs[key].startswith("./"):
                path_to_check = datadir + datadirs[key][1:]
            else:
                path_to_check = datadirs[key]

            output2, physical_disk2 = _get_physical_info(path_to_check)

            if datadir_physical != physical_disk2:
                output += output2

    return output
Ejemplo n.º 2
0
def get_tables_without_pk(session, advices=False, details=False):
    tbl_no_pk = 0
    title = "Tables without PK"
    stmt = """SELECT concat(tables.table_schema, '/' , tables.table_name) as `Table Name`, tables.engine as `Engine`,
       tables.table_rows as `Rows` FROM information_schema.tables  LEFT JOIN (
          SELECT table_schema , table_name
          FROM information_schema.statistics
          GROUP BY table_schema, table_name, index_name HAVING
           SUM( case when non_unique = 0 and nullable != 'YES' then 1 else 0 end ) = count(*) ) puks
           ON tables.table_schema = puks.table_schema and tables.table_name = puks.table_name
           WHERE puks.table_name is null
            AND tables.table_type = 'BASE TABLE' AND Engine='InnoDB'"""
    result = session.run_sql(stmt)
    output = util.output(title, "")
    for row in result.fetch_all():
        tbl_no_pk += 1

    output = util.output(title, tbl_no_pk)

    if advices:
        if tbl_no_pk > 0:
            output += util.print_red(
                "It's not recommended to have tables without Primary Key")

    if details and tbl_no_pk > 0:
        output2, nbrows = util.run_and_print("Details", stmt, session)
        output += output2

    return output
Ejemplo n.º 3
0
def get_engines(session, advices=False, details=False):
    got_inno = False
    other = False
    title = "Engines Used"
    stmt = """select count(*) as '# TABLES', CONCAT(ROUND(sum(data_length) / ( 1024 * 1024 * 1024 ), 2), 'G') DATA, 
                     CONCAT(ROUND(sum(index_length) / ( 1024 * 1024 * 1024 ), 2), 'G') INDEXES,
                     CONCAT(sum(ROUND(( data_length + index_length ) / ( 1024 * 1024 * 1024 ), 2)), 'G') 'TOTAL SIZE', 
                     engine as ENGINE from information_schema.TABLES  
              where TABLE_SCHEMA NOT IN ('mysql', 'information_schema', 'performance_schema', 'sys') 
                and data_length group by engine;"""
    result = session.run_sql(stmt)
    output = util.output(title, "")
    for row in result.fetch_all():
        output += util.output(row[4],
                              "{} table(s) ({})".format(row[0], row[3]), 1)
        if row[4] == "InnoDB":
            got_inno = True
        else:
            other = True

    if advices:
        if not got_inno and other:
            output += util.print_red("It's recommended to only use InnoDB")

    if details:
        output2, nbrows = util.run_and_print("Details".format(title), stmt,
                                             session)
        output += output2

    return output
Ejemplo n.º 4
0
def _get_innodb_log_details_57(session):
    title = "InnoDB Logs"
    stmt = """SELECT sys.format_bytes(Log_File_Size) Log_File_Size, 
              sys.format_bytes(Log_File_Size * Nb_files) Tot_Log_Size, Nb_Files, Flush_at_trx_commit 
  FROM (	
	SELECT variable_value as Log_File_Size
	FROM performance_schema.global_variables
	WHERE variable_name = 'innodb_log_file_size') A,
     (
	SELECT variable_value as Flush_at_Trx_commit
	FROM performance_schema.global_variables
	WHERE variable_name = 'innodb_flush_log_at_trx_commit') B,
     (     
	SELECT variable_value as Nb_Files
	FROM performance_schema.global_variables
	WHERE variable_name = 'innodb_log_files_in_group') C
    """
    result = session.run_sql(stmt)
    row = result.fetch_one()
    output = util.output(title, "")
    output += util.output("File Size", row[0], 1)
    output += util.output("Nb of Files", row[2], 1)
    output += util.output("Total Size", row[1], 1)
    output += util.output("Flush at Commit", row[3], 1)

    return output
Ejemplo n.º 5
0
def get_dataset(session, branch):
    stmt = """SELECT format_bytes(SUM(data_length)) Data,
                     format_bytes(SUM(index_length)) Indexes,
                     format_bytes(SUM(data_length)+sum(index_length)) 'Total Size'
              FROM information_schema.TABLES GROUP BY NULL"""
    if branch == "56":
        stmt = """SELECT CONCAT(ROUND(SUM(data_length) / ( 1024 * 1024 * 1024 ), 2), 'G') Data,
                     CONCAT(ROUND(SUM(index_length) / ( 1024 * 1024 * 1024 ), 2), 'G') Indexes,
                     CONCAT(ROUND(SUM(data_length+index_length)/( 1024 * 1024 * 1024 ), 2), 'G') 'Total Size'
              FROM information_schema.TABLES GROUP BY NULL"""
    else:
        if branch == "57":
            stmt = """SELECT sys.format_bytes(SUM(data_length)) Data,
                     sys.format_bytes(SUM(index_length)) Indexes,
                     sys.format_bytes(SUM(data_length)+sum(index_length)) 'Total Size'
              FROM information_schema.TABLES GROUP BY NULL"""

    result = session.run_sql(stmt)
    output = util.output("Dataset", "")
    while result.has_data():
        object_res = result.fetch_one_object()
        if object_res:
            for key in object_res:
                output += util.output(key, object_res[key], 1)
            result.next_result()
        else:
            break

    return output
Ejemplo n.º 6
0
def _get_blocked_hosts(session, advices, details):
    title = "Blocked Hosts"
    stmt = """select host, ip, COUNT_HOST_BLOCKED_ERRORS, @@max_connect_errors 
    from performance_schema.host_cache where COUNT_HOST_BLOCKED_ERRORS>0;
    """
    result = session.run_sql(stmt)
    if result.has_data():
        rows = result.fetch_all()
        if len(rows) > 0:
            output = util.output(title, "")
            for row in rows:
                output += util.output("{} ({})".format(row[0], row[1]), row[2],
                                      1)
            if advices:
                output += util.print_red(
                    "You have blocked host(s), please use mysqladmin flush-hosti and maybe increase max_connect_errors ({})."
                    .format(row[3]))
            if details:
                stmt = """select HOST, IP, COUNT_HOST_BLOCKED_ERRORS, FIRST_SEEN, LAST_ERROR_SEEN 
                      from performance_schema.host_cache where COUNT_HOST_BLOCKED_ERRORS>0
                    """
                stmt = """SELECT HOST, IP, COUNT_HOST_BLOCKED_ERRORS, concat("'", FIRST_SEEN, "'") FIRST_SEEN,
                          concat("'", LAST_SEEN, "'") LAST_SEEN
                          FROM performance_schema.host_cache where COUNT_HOST_BLOCKED_ERRORS>0"""
                output2, nbrows = util.run_and_print("Blocked Hosts", stmt,
                                                     session)
                output += output2
        else:
            output = util.output(title, "none")

    return output
Ejemplo n.º 7
0
def _get_ahi_details(session, advices):
    title = "Adaptive Hash Index"
    stmt = """SELECT ROUND(
            (
              SELECT Variable_value FROM sys.metrics
              WHERE Variable_name = 'adaptive_hash_searches'
            ) /
            (
              (
               SELECT Variable_value FROM sys.metrics
               WHERE Variable_name = 'adaptive_hash_searches_btree'
              )  + (
               SELECT Variable_value FROM sys.metrics
               WHERE Variable_name = 'adaptive_hash_searches'
              )
            ) * 100,2
          ) 'AHIRatio',
		  ROUND(
            (
              SELECT Variable_value FROM sys.metrics
              WHERE Variable_name = 'adaptive_hash_searches'
            ) /
            (
              (
               SELECT Variable_value FROM sys.metrics
               WHERE Variable_name = 'adaptive_hash_searches_btree'
              )  + (
               SELECT Variable_value FROM sys.metrics
               WHERE Variable_name = 'adaptive_hash_searches'
              )
            ) * 100
          ) 'AHIRatioInt',
		  (
					SELECT variable_value
					FROM performance_schema.global_variables
					WHERE variable_name = 'innodb_adaptive_hash_index'
		  ) AHIEnabled,
		  (
					SELECT variable_value
					FROM performance_schema.global_variables
					WHERE variable_name = 'innodb_adaptive_hash_index_parts'
		  ) AHIParts"""
    result = session.run_sql(stmt)
    row = result.fetch_one()
    output = util.output(title, "")
    output += util.output("AHI Enabled", row[2], 1)
    output += util.output("AHI Parts", row[3], 1)
    output += util.output("AHI Ratio", row[0], 1)

    if advices and row[2] == "ON":
        output += util.print_orange("AHI is not recommended for all workloads")
    return output
Ejemplo n.º 8
0
def get_users_auth_plugins(session, advices=False):
    title = "Authentication Plugins"
    stmt = "select plugin, count(*) 'nb of users' from mysql.user group by plugin"
    result = session.run_sql(stmt)
    output = util.output("Authentication Plugins", "")
    for row in result.fetch_all():
        output += util.output(row[0], "{} user(s)".format(row[1]), 1)
        if row[0] not in mds_allowed_auth_plugins:
            if advices:
                output += util.print_red("{} is not supported in MDS".format(
                    row[0]))

    return output
Ejemplo n.º 9
0
def get_dataset(session):
    stmt = """SELECT format_bytes(SUM(data_length)) Data,
                     format_bytes(SUM(index_length)) Indexes,
                     format_bytes(SUM(data_length)+sum(index_length)) 'Total Size'
              FROM information_schema.TABLES GROUP BY NULL"""
    result = session.run_sql(stmt)
    output = util.output("Dataset", "")
    while result.has_data():
        object_res = result.fetch_one_object()
        for key in object_res:
            output += util.output(key, object_res[key], 1)
        result.next_result()

    return output
Ejemplo n.º 10
0
def get_linux_memory_usage(advices):
    command = "free -h"
    all_info = subprocess.check_output(command, shell=True).strip()                  
    for line in all_info.decode("utf-8").split("\n"): 
        if "Mem:" in line:
            line_sp = line.split()
            memory =  util.output("Total Memory", line_sp[1])
            memory += util.output("Memory Used", line_sp[2])
            memory += util.output("Memory Free", line_sp[3])
            memory += util.output("Filesystem Cache", line_sp[5])
        if "Swap:" in line:
            line_sp = line.split()
            memory += util.output("Total Swap", line_sp[1])
            memory += util.output("Swap Used", line_sp[2])
            memory += util.output("Swap Free", line_sp[3])
            swappiness = util.get_sysctl_value('vm.swappiness')
            memory += util.output("Swappiness", swappiness)
            if advices:
                if int(swappiness) <= 10 and int(swappiness) > 0:
                    memory += util.print_green("\nYour swappiness value is good")
                elif int(swappiness) == 0:
                    memory += util.print_red("0 as swappiness is dangerous, you should set it to 5")
                else:
                    memory += util.print_red("Your swappiness value is to high, you should set it to a value between 1 and 10")
    return memory
Ejemplo n.º 11
0
def _get_innodb_log_details_56(session):
    title = "InnoDB Logs"
    stmt = """SELECT concat(round(@@innodb_log_file_size/1024/1024), " MiB") Log_File_Size, 
              concat(round((@@innodb_log_file_size * @@innodb_log_files_in_group)/1024/1024), " MiB") Tot_Log_Size, 
              @@innodb_log_files_in_group Nb_files, @@innodb_flush_log_at_trx_commit Flush_at_trx_commit"""
    result = session.run_sql(stmt)
    row = result.fetch_one()
    output = util.output(title, "")
    output += util.output("File Size", row[0], 1)
    output += util.output("Nb of Files", row[2], 1)
    output += util.output("Total Size", row[1], 1)
    output += util.output("Flush at Commit", row[3], 1)

    return output
Ejemplo n.º 12
0
def get_largest_innodb_tables(session, limit=10):
    try:
        from prettytable import PrettyTable
    except:
        return (util.print_red(
            "Error importing module prettytable, check if it's installed"))

    stmt = """SELECT NAME as `Table Name`, TABLE_ROWS as `Rows`, format_bytes(data_length) `Data Size`,
                     format_bytes(PAGE_SIZE) `Page Size`, SPACE_TYPE `Space Type`,
                     format_bytes(index_length) `Index Size`,
                     format_bytes(data_length+index_length) `Total Size`,
                     format_bytes(data_free) `Data Free`,
                     format_bytes(FILE_SIZE) `File Size`,
                     format_bytes((FILE_SIZE/10 - (data_length/10 +
                           index_length/10))*10) `Wasted Size`
                FROM information_schema.TABLES as t
                JOIN information_schema.INNODB_TABLESPACES as it
                ON it.name = concat(table_schema,"/",table_name)
                ORDER BY (data_length + index_length) desc limit %s""" % str(
        limit)
    result = session.run_sql(stmt)
    headers = []
    for col in result.get_columns():
        headers.append(col.get_column_label())

    tab = PrettyTable(headers)
    tab.align = 'r'

    output = util.output("Top %s largest InnoDB Tables" % str(limit), "")
    for row in result.fetch_all():
        tab.add_row(row)
    tab.align[result.get_columns()[0].get_column_label()] = 'l'
    output += str(tab) + "\n"
    return output
Ejemplo n.º 13
0
def get_tables_without_pk(session):
    try:
        from prettytable import PrettyTable
    except:
        return (util.print_red(
            "Error importing module prettytable, check if it's installed"))

    stmt = """SELECT concat(tables.table_schema, '/' , tables.table_name) as `Table Name`, tables.engine as `Engine`,
       tables.table_rows as `Row` FROM information_schema.tables  LEFT JOIN (
          SELECT table_schema , table_name
          FROM information_schema.statistics
          GROUP BY table_schema, table_name, index_name HAVING
           SUM( case when non_unique = 0 and nullable != 'YES' then 1 else 0 end ) = count(*) ) puks
           ON tables.table_schema = puks.table_schema and tables.table_name = puks.table_name
           WHERE puks.table_name is null
            AND tables.table_type = 'BASE TABLE' AND Engine='InnoDB'"""

    result = session.run_sql(stmt)
    headers = []
    for col in result.get_columns():
        headers.append(col.get_column_label())

    tab = PrettyTable(headers)
    tab.align = 'r'

    output = util.output("Tables without PK", "")
    for row in result.fetch_all():
        tab.add_row(row)
    tab.align[result.get_columns()[0].get_column_label()] = 'l'
    output += str(tab) + "\n"
    return output
Ejemplo n.º 14
0
def get_users_privileges(session, advices=False):
    output = ""
    output_err = ""
    bad_users = 0
    sw_user = False
    title = "MDS Incompatible Privileges"
    # get all users
    stmt = "select user, host from mysql.user"
    result = session.run_sql(stmt)
    for row in result.fetch_all():
        stmt = "show grants for `{}`@`{}`".format(row[0], row[1])
        result2 = session.run_sql(stmt)
        sw_user = True
        for row2 in result2.fetch_all():
            m = re.match('GRANT (.*) ON', row2[0])
            if m:
                tab_grants = m.group(1).split(',')
                for priv in tab_grants:
                    if priv.strip() not in mds_allowed_privileges:
                        if sw_user:
                            bad_users += 1
                            sw_user = False
                        output_err += util.print_red(
                            "{}@{} has a privilege not supported in MDS: {}".
                            format(row[0], row[1], priv.strip()))

    output += util.output(title, "{} user(s)".format(bad_users))
    if advices:
        output += output_err

    return output
Ejemplo n.º 15
0
def check_reserved_keywords(session, advices=False, details=False):
    title = "Use of 8.0 Reserved Keywords"
    output = util.output(title, "")
    output += _check_reserved_keywords_schema(session, advices, details)
    output += _check_reserved_keywords_table(session, advices, details)
    output += _check_reserved_keywords_column(session, advices, details)

    return output
Ejemplo n.º 16
0
def _get_buffer_pool_details_56(session):
    title = "InnoDB Buffer Pool"
    stmt = """SELECT FORMAT(A.num * 100.0 / B.num,2) BufferPoolFullPct, 
        concat(round(@@innodb_buffer_pool_size/1024/1024)," MiB") BP_Size, 
        @@innodb_buffer_pool_instances BP_instances FROM
	(SELECT variable_value num FROM information_schema.global_status
	WHERE variable_name = 'Innodb_buffer_pool_pages_data') A,
	(SELECT variable_value num FROM information_schema.global_status
	WHERE variable_name = 'Innodb_buffer_pool_pages_total') B"""
    result = session.run_sql(stmt)
    row = result.fetch_one()
    output = util.output(title, "")
    output += util.output("BP Size", row[1], 1)
    output += util.output("BP Instance(s)", row[2], 1)
    output += util.output("BP filled at ", "{}%".format(row[0]), 1)

    return output
Ejemplo n.º 17
0
def version_info(session):
    supported = False
    stmt = "select @@version_comment, @@version, @@version_compile_machine"
    result = session.run_sql(stmt)
    row = result.fetch_one()
    type = _get_type_by_pid(session)
    output = util.output(
        "MySQL Version", "{} ({}) - {} {}".format(row[0], row[1], row[2],
                                                  type))
    if int(row[1][0]) >= 8 and row[0].startswith('MySQL'):
        supported = True
    version = row[1]
    stmt = "show variables like 'aurora_version'"
    result = session.run_sql(stmt)
    row = result.fetch_one()
    if row:
        output += util.output("Aurora", "{}".format(row[1]))
    return supported, output, version
Ejemplo n.º 18
0
def _get_ahi_details_56(session, advices):
    title = "Adaptive Hash Index"
    stmt = "select @@innodb_adaptive_hash_index"
    result = session.run_sql(stmt)
    row = result.fetch_one()
    output = util.output(title, row[0])
    if advices and row[0] == 1:
        output += util.print_orange("AHI is not recommended for all workloads")
    return output
Ejemplo n.º 19
0
def _check_reserved_keywords_column(session, advices, details):
    bad_column = []
    stmt = """select column_name from information_schema.columns
              where TABLE_SCHEMA not in ('mysql', 'performance_schema', 'sys', 'information_schema')"""
    result = session.run_sql(stmt)
    for row in result.fetch_all():
        if row[0].upper() in mysql_80_keywords:
            bad_column.append(row[0])
    output = util.output("in column name", len(bad_column), 1)
    if advices and len(bad_column) > 0:
        output += util.print_orange(
            "It's recommended to not use reserved keywords, or it's mandatory to quote them with `backticks`"
        )
    if details and len(bad_column) > 0:
        for el in bad_column:
            output += util.output(el.upper(),
                                  util.print_red_inline("RESERVED"), 1)
    return output
Ejemplo n.º 20
0
def version_info(session):
    supported = False
    stmt = "select @@version_comment, @@version, @@version_compile_machine"
    result = session.run_sql(stmt)
    row = result.fetch_one()
    output = util.output("MySQL Version",
                         "%s (%s) - %s" % (row[0], row[1], row[2]))
    if int(row[1][0]) >= 8 and row[0].startswith('MySQL'):
        supported = True
    return supported, output
Ejemplo n.º 21
0
def get_routines(session, advices=False, details=False):
    title = "User Defined Routines"
    stmt = "select routine_schema, count(*) `amount` from information_schema.routines where definer not in ('mysql.sys@localhost') group by routine_schema"
    result = session.run_sql(stmt)
    output = util.output(title, "")
    for row in result.fetch_all():
        output += util.output(row[0], row[1], 1)
        if advices and row[0] in "mysql":
            output += util.print_red(
                "Custom routines in {} schema are not supported in MDS".format(
                    row[0]))
        if details:
            stmt = """select routine_schema, routine_name, definer from information_schema.routines 
                      where routine_schema = '{}' and definer not in ('mysql.sys@localhost')""".format(
                row[0])
            output2, nbrows = util.run_and_print(
                "Routines in {}".format(row[0]), stmt, session)
            output += output2

    return output
Ejemplo n.º 22
0
def _get_buffer_pool_details(session, branch):
    title = "InnoDB Buffer Pool"
    usesys = ""
    if branch == "57":
        usesys = "sys."
    stmt = """SELECT ROUND(A.num * 100.0 / B.num)  BufferPoolFull, BP_Size, BP_instances,
FORMAT(F.num * 100.0 / E.num,2) DiskReadRatio, 
ROUND(F.num*100/E.num) DiskReadRatioInt
FROM (
	SELECT variable_value num FROM performance_schema.global_status
	WHERE variable_name = 'Innodb_buffer_pool_pages_data') A,
     (
	SELECT variable_value num FROM performance_schema.global_status
	WHERE variable_name = 'Innodb_buffer_pool_pages_total') B,
     (
	SELECT {}format_bytes(variable_value) as BP_Size
	FROM performance_schema.global_variables
	WHERE variable_name = 'innodb_buffer_pool_size') C,
     (
	SELECT variable_value as BP_instances
	FROM performance_schema.global_variables
	WHERE variable_name = 'innodb_buffer_pool_instances') D,
     (     
   	SELECT variable_value num 
	FROM performance_schema.global_status       
	WHERE variable_name = 'Innodb_buffer_pool_read_requests') E,       
     (
   	SELECT variable_value num 
	FROM performance_schema.global_status        
	WHERE variable_name = 'Innodb_buffer_pool_reads') F
    """.format(usesys)
    result = session.run_sql(stmt)
    row = result.fetch_one()
    output = util.output(title, "")
    output += util.output("BP Size", row[1], 1)
    output += util.output("BP Instance(s)", row[2], 1)
    output += util.output("BP filled at ", "{}%".format(row[0]), 1)
    output += util.output("Disk Read Ratio", "{}%".format(row[3]), 1)

    return output
Ejemplo n.º 23
0
def get_configured_variables(session, details=False):
    title = "Global Variables Configured"
    stmt = """SELECT count(*), VARIABLE_SOURCE
               FROM performance_schema.variables_info t1
               JOIN performance_schema.global_variables t2
               ON t2.VARIABLE_NAME=t1.VARIABLE_NAME
               WHERE t1.VARIABLE_SOURCE != 'COMPILED' GROUP BY VARIABLE_SOURCE"""
    result = session.run_sql(stmt)
    output = util.output(title, "")
    for row in result.fetch_all():
        output += util.output(row[1], row[0], 1)

    if details:
        stmt = """SELECT t1.VARIABLE_NAME, VARIABLE_VALUE, VARIABLE_SOURCE 
               FROM performance_schema.variables_info t1 
               JOIN performance_schema.global_variables t2 
               ON t2.VARIABLE_NAME=t1.VARIABLE_NAME 
               WHERE t1.VARIABLE_SOURCE != 'COMPILED'"""
        output2, nbrows = util.run_and_print("Details".format(title), stmt,
                                             session)
        output += output2
    return output
Ejemplo n.º 24
0
def get_flush_commands(session, advices):
    title = "Flush Commands"
    stmt = "show global status like 'Com_flush'"
    result = session.run_sql(stmt)
    row = result.fetch_one()
    output = util.output(title, "{}".format(row[1]))
    if advices:
        if int(row[1]) > 0:
            output += util.print_orange(
                "Pay attention that FLUSH commands like FLUSH PRIVILEGES will break MDS Inbound Replication"
            )
            output += util.print_orange("Consider using FLUSH LOCAL instead")
    return output
Ejemplo n.º 25
0
def _get_physical_info(path):
    command = "df -h %s | tail -n 1" % path
    all_info = subprocess.check_output(command, shell=True).strip()
    physical_disk = ""
    for line in all_info.decode("utf-8").split("\n"):
        line_sp = line.split()
        physical_disk = line_sp[0]
        output = util.output("Physical drive", physical_disk, 1)
        output += util.output("Size", line_sp[1], 1)
        output += util.output("Used (%s)" % line_sp[4], line_sp[2], 1)
        output += util.output("Free", line_sp[3], 1)
        output += util.output("Mount point", line_sp[5], 1)
        filesystem, attributes = _get_mount_options(line_sp[0])
        output += util.output("Filesystem", filesystem, 1)
        output += util.output("Mount attributes", attributes, 1)
    return output, physical_disk
Ejemplo n.º 26
0
def get_tables_in_mysql(session, branch, advices=False, details=False):
    title = "Extra tables in mysql schema"
    stmt = "select count(*) from information_schema.tables where table_schema='mysql'"
    output = ""
    tot_tbl = 28
    if branch == "57":
        tot_tbl = 31
    if branch == "80":
        tot_tbl = 38
    result = session.run_sql(stmt)
    row = result.fetch_one()
    if row[0] > tot_tbl:
        output = util.output(title, int(row[0]) - tot_tbl)
        if advices:
            output += util.print_red(
                "Extra tables in mysql schema are not supported in MDS")
        # TODO add which one
    return output
Ejemplo n.º 27
0
def get_replication_info(session, advices, branch, releasever):
    supported = False
    if branch == "80":
        if int(releasever) > 19:
            stmt = """select @@server_id, @@binlog_checksum, @@binlog_encryption, @@binlog_format, @@binlog_row_image,
                     @@binlog_row_metadata, @@enforce_gtid_consistency, @@gtid_mode, @@log_bin, 
                     @@log_bin_basename, @@transaction_write_set_extraction, @@sync_binlog,
                     @@binlog_transaction_dependency_tracking, @@lower_case_table_names,  @@binlog_transaction_compression"""
        else:
            stmt = """select @@server_id, @@binlog_checksum, @@binlog_encryption, @@binlog_format, @@binlog_row_image,
                     @@binlog_row_metadata, @@enforce_gtid_consistency, @@gtid_mode, @@log_bin, 
                     @@log_bin_basename, @@transaction_write_set_extraction,  @@sync_binlog,
                     @@binlog_transaction_dependency_tracking, @@lower_case_table_names"""
    else:
        if branch == "56":
            stmt = """select @@server_id, @@binlog_checksum, "n/a", @@binlog_format, @@binlog_row_image,
                     "n/a", @@enforce_gtid_consistency, @@gtid_mode, @@log_bin,  @@sync_binlog,
                     @@log_bin_basename, "n/a", "n/a", @@lower_case_table_names"""
        else:
            if branch == "57":
                if int(releasever) > 21:
                    stmt = """select @@server_id, @@binlog_checksum, @@binlog_format, @@binlog_row_image,
                     @@enforce_gtid_consistency, @@gtid_mode, @@log_bin, 
                     @@log_bin_basename, @@transaction_write_set_extraction, @@sync_binlog,
                     @@binlog_transaction_dependency_tracking, @@lower_case_table_names"""
                else:
                    stmt = """select @@server_id, @@binlog_checksum, @@binlog_format, @@binlog_row_image,
                     @@enforce_gtid_consistency, @@gtid_mode, @@log_bin,  @@sync_binlog,
                     @@log_bin_basename, @@transaction_write_set_extraction,
                     @@lower_case_table_names"""
    result = session.run_sql(stmt)
    object_res = result.fetch_one_object()
    if object_res:
        output = ("\n")
        output += util.output("Replication Information", "")
        for key in object_res:
            if key == "@@server_id":
                output += util.output("Server Id",
                                      "{}".format(object_res[key]), 1)
                if advices and object_res[key] == 1:
                    output += util.print_orange(
                        "You should consider a unique server id")
                next
            if key == "@@binlog_checksum":
                output += util.output("Binlog Checksum",
                                      "{}".format(object_res[key]), 1)
                next
            if key == "@@binlog_encryption":
                output += util.output("Binlog Encryption",
                                      "{}".format(object_res[key]), 1)
                next
            if key == "@@binlog_format":
                output += util.output("Binlog Format",
                                      "{}".format(object_res[key]), 1)
                if advices and object_res[key] != "ROW":
                    output += util.print_red(
                        "You should only use Binary Log format = ROW !")
                next
            if key == "@@binlog_row_image":
                output += util.output("Binlog Row Image",
                                      "{}".format(object_res[key]), 1)
                next
            if key == "@@binlog_row_metadata":
                output += util.output("Binlog Row Metadata",
                                      "{}".format(object_res[key]), 1)
                next
            if key == "@@gtid_mode":
                output += util.output("GTID Mode",
                                      "{}".format(object_res[key]), 1)
                if advices and object_res[key] != "ON":
                    output += util.print_red("You should enable GTIDs !")
                next
            if key == "@@gtid_consistency":
                output += util.output("Enforce GTID Consistency",
                                      "{}".format(object_res[key]), 1)
                next
            if key == "@@log_bin":
                output += util.output("Log Bin", "{}".format(object_res[key]),
                                      1)
                if advices and object_res[key] != 1:
                    output += util.print_red("You should enable binary logs !")
                next
            if key == "@@sync_binlog":
                output += util.output("Sync Binlog",
                                      "{}".format(object_res[key]), 1)
                next
            if key == "@@log_bin_basename":
                output += util.output("Log Bin Basename",
                                      "{}".format(object_res[key]), 1)
                next
            if key == "@@transaction_write_set_extraction":
                output += util.output("Trx Write Set Extraction",
                                      "{}".format(object_res[key]), 1)
                next
            if key == "@@binlog_transaction_dependency_tracking":
                output += util.output("Binlog Trx Dependency Tracking",
                                      "{}".format(object_res[key]), 1)
                next
            if key == "lower_case_table_names" and object_res[key] == 1:
                output += util.output("Lower Case Table Names",
                                      "{}".format(object_res[key]), 1)
                if advices:
                    output += util.print_orange(
                        "For MDS Inbound Replication, lower_case_table_names should be 0"
                    )

    return output
Ejemplo n.º 28
0
def _get_innodb_log_details(session):
    title = "InnoDB Logs"
    stmt = """SELECT CONCAT(
		            (
						SELECT FORMAT_BYTES(
							STORAGE_ENGINES->>'$."InnoDB"."LSN"' - STORAGE_ENGINES->>'$."InnoDB"."LSN_checkpoint"'
							               )
								FROM performance_schema.log_status),
						" / ",
						format_bytes(
							(SELECT VARIABLE_VALUE
								FROM performance_schema.global_variables
								WHERE VARIABLE_NAME = 'innodb_log_file_size'
							)  * (
							 SELECT VARIABLE_VALUE
							 FROM performance_schema.global_variables
							 WHERE VARIABLE_NAME = 'innodb_log_files_in_group'))
					) CheckpointInfo,
					(
						SELECT ROUND(((
							SELECT STORAGE_ENGINES->>'$."InnoDB"."LSN"' - STORAGE_ENGINES->>'$."InnoDB"."LSN_checkpoint"'
							FROM performance_schema.log_status) / ((
								SELECT VARIABLE_VALUE
								FROM performance_schema.global_variables
								WHERE VARIABLE_NAME = 'innodb_log_file_size'
							) * (
							SELECT VARIABLE_VALUE
							FROM performance_schema.global_variables
							WHERE VARIABLE_NAME = 'innodb_log_files_in_group')) * 100),2)
					)  AS CheckpointAge,
					format_bytes( (
						SELECT VARIABLE_VALUE
						FROM performance_schema.global_variables
						WHERE variable_name = 'innodb_log_file_size')
					) AS InnoDBLogFileSize,
					(
						SELECT VARIABLE_VALUE
						FROM performance_schema.global_variables
						WHERE variable_name = 'innodb_log_files_in_group'
					) AS NbFiles,
					(
						SELECT VARIABLE_VALUE
						FROM performance_schema.global_status
						WHERE VARIABLE_NAME = 'Innodb_redo_log_enabled'
					) AS RedoEnabled, 
					(
						SELECT variable_value
						FROM performance_schema.global_variables
						WHERE variable_name = 'innodb_flush_log_at_trx_commit'
					) AS FlushAtCommit
    """
    result = session.run_sql(stmt)
    row = result.fetch_one()
    output = util.output(title, "")
    output += util.output("File Size", row[2], 1)
    output += util.output("Nb of Files", row[3], 1)
    output += util.output("Checkpoint Info", row[0], 1)
    output += util.output("CheckPointAge", "{}%".format(row[1]), 1)
    output += util.output("Flush at Commit", row[5], 1)
    output += util.output("Redo Log", row[4], 1)

    return output