Ejemplo n.º 1
0
def insert_split_schema_version_v2(conn, cur, user, pwd):
    try:
        query_cur = actions.QueryCursor(cur)
        is_primary = actions.check_current_cluster_is_primary(query_cur)
        if not is_primary:
            logging.warn("should run in primary cluster")
            raise e

        # primary cluster
        dml_cur = actions.DMLCursor(cur)
        sql = """replace into __all_core_table(table_name, row_id, column_name, column_value)
              values ('__all_global_stat', 1, 'split_schema_version_v2', '-1');"""
        rowcount = dml_cur.exec_update(sql)
        if rowcount <= 0:
            logging.warn("invalid rowcount : {0}".format(rowcount))
            raise e

        # standby cluster
        standby_cluster_list = actions.fetch_standby_cluster_infos(
            conn, query_cur, user, pwd)
        for standby_cluster in standby_cluster_list:
            # connect
            logging.info(
                "create connection : cluster_id = {0}, ip = {1}, port = {2}".
                format(standby_cluster['cluster_id'], standby_cluster['ip'],
                       standby_cluster['port']))
            tmp_conn = mysql.connector.connect(user=standby_cluster['user'],
                                               password=standby_cluster['pwd'],
                                               host=standby_cluster['ip'],
                                               port=standby_cluster['port'],
                                               database='oceanbase')
            tmp_cur = tmp_conn.cursor(buffered=True)
            tmp_conn.autocommit = True
            tmp_query_cur = actions.QueryCursor(tmp_cur)
            # check if stanby cluster
            is_primary = actions.check_current_cluster_is_primary(
                tmp_query_cur)
            if is_primary:
                logging.exception(
                    """primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
                    .format(standby_cluster['cluster_id'],
                            standby_cluster['ip'], standby_cluster['port']))
                raise e
            # replace
            tmp_dml_cur = actions.DMLCursor(tmp_cur)
            sql = """replace into __all_core_table(table_name, row_id, column_name, column_value)
                values ('__all_global_stat', 1, 'split_schema_version_v2', '-1');"""
            rowcount = tmp_dml_cur.exec_update(sql)
            if rowcount <= 0:
                logging.warn("invalid rowcount : {0}".format(rowcount))
                raise e
            # close
            tmp_cur.close()
            tmp_conn.close()
    except Exception, e:
        logging.warn("init split_schema_version_v2 failed")
        raise e
Ejemplo n.º 2
0
def trigger_schema_split_job(conn, cur, user, pwd):
  try:
    query_cur = actions.QueryCursor(cur)
    is_primary = actions.check_current_cluster_is_primary(query_cur)
    if not is_primary:
      logging.warn("current cluster should by primary")
      raise e

    # primary cluster
    trigger_schema_split_job_by_cluster(conn, cur)

    # stanby cluster
    standby_cluster_list = actions.fetch_standby_cluster_infos(conn, query_cur, user, pwd)
    for standby_cluster in standby_cluster_list:
      # connect
      logging.info("start to trigger schema split by cluster: cluster_id = {0}"
                   .format(standby_cluster['cluster_id']))
      logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
                   .format(standby_cluster['cluster_id'],
                           standby_cluster['ip'],
                           standby_cluster['port']))
      tmp_conn = mysql.connector.connect(user     =  standby_cluster['user'],
                                         password =  standby_cluster['pwd'],
                                         host     =  standby_cluster['ip'],
                                         port     =  standby_cluster['port'],
                                         database =  'oceanbase')
      tmp_cur = tmp_conn.cursor(buffered=True)
      tmp_conn.autocommit = True
      tmp_query_cur = actions.QueryCursor(tmp_cur)
      # check if stanby cluster
      is_primary = actions.check_current_cluster_is_primary(tmp_query_cur)
      if is_primary:
        logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
                          .format(standby_cluster['cluster_id'],
                                  standby_cluster['ip'],
                                  standby_cluster['port']))
        raise e
      # trigger schema split
      trigger_schema_split_job_by_cluster(tmp_conn, tmp_cur)
      # close
      tmp_cur.close()
      tmp_conn.close()
      logging.info("""trigger schema split success : cluster_id = {0}, ip = {1}, port = {2}"""
                      .format(standby_cluster['cluster_id'],
                              standby_cluster['ip'],
                              standby_cluster['port']))

  except Exception, e:
    logging.warn("trigger schema split failed")
    raise e
Ejemplo n.º 3
0
def exec_sys_vars_upgrade_dml_by_cluster(standby_cluster_info):
  try:

    logging.info("exec_sys_vars_upgrade_dml_by_cluster : cluster_id = {0}, ip = {1}, port = {2}"
                 .format(standby_cluster_info['cluster_id'],
                         standby_cluster_info['ip'],
                         standby_cluster_info['port']))
    logging.info("create connection : cluster_id = {0}, ip = {1}, port = {2}"
                 .format(standby_cluster_info['cluster_id'],
                         standby_cluster_info['ip'],
                         standby_cluster_info['port']))
    conn = mysql.connector.connect(user     =  standby_cluster_info['user'],
                                   password =  standby_cluster_info['pwd'],
                                   host     =  standby_cluster_info['ip'],
                                   port     =  standby_cluster_info['port'],
                                   database =  'oceanbase',
                                   raise_on_warnings = True)
    cur = conn.cursor(buffered=True)
    conn.autocommit = True
    dml_cur = DMLCursor(cur)
    query_cur = QueryCursor(cur)
    is_primary = actions.check_current_cluster_is_primary(query_cur)
    if is_primary:
      logging.exception("""primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
                        .format(standby_cluster_info['cluster_id'],
                                standby_cluster_info['ip'],
                                standby_cluster_info['port']))
      raise e

    # only update sys tenant in standby cluster
    tenant_id = 1
    # calc diff
    (update_sys_var_list, update_sys_var_ori_list, add_sys_var_list) = calc_diff_sys_var(cur, tenant_id)
    logging.info('update system variables list: [%s]', ', '.join(str(sv) for sv in update_sys_var_list))
    logging.info('update system variables original list: [%s]', ', '.join(str(sv) for sv in update_sys_var_ori_list))
    logging.info('add system variables list: [%s]', ', '.join(str(sv) for sv in add_sys_var_list))
    # update
    update_sys_vars_for_tenant(dml_cur, tenant_id, update_sys_var_list)
    add_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list)
    special_update_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list, 'nls_date_format', 'YYYY-MM-DD HH24:MI:SS');
    special_update_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list, 'nls_timestamp_format', 'YYYY-MM-DD HH24:MI:SS.FF');
    special_update_sys_vars_for_tenant(dml_cur, tenant_id, add_sys_var_list, 'nls_timestamp_tz_format', 'YYYY-MM-DD HH24:MI:SS.FF TZR TZD');

    cur.close()
    conn.close()

  except Exception, e:
    logging.exception("""exec_sys_vars_upgrade_dml_in_standby_cluster failed :
                         cluster_id = {0}, ip = {1}, port = {2}"""
                         .format(standby_cluster_info['cluster_id'],
                                 standby_cluster_info['ip'],
                                 standby_cluster_info['port']))
    raise e
def do_each_tenant_dml_actions_by_standby_cluster(standby_cluster_infos):
    try:
        tenant_id_list = [1]
        for standby_cluster_info in standby_cluster_infos:
            logging.info(
                "do_each_tenant_dml_actions_by_standby_cluster: cluster_id = {0}, ip = {1}, port = {2}"
                .format(standby_cluster_info['cluster_id'],
                        standby_cluster_info['ip'],
                        standby_cluster_info['port']))
            logging.info(
                "create connection : cluster_id = {0}, ip = {1}, port = {2}".
                format(standby_cluster_info['cluster_id'],
                       standby_cluster_info['ip'],
                       standby_cluster_info['port']))
            conn = mysql.connector.connect(
                user=standby_cluster_info['user'],
                password=standby_cluster_info['pwd'],
                host=standby_cluster_info['ip'],
                port=standby_cluster_info['port'],
                database='oceanbase',
                raise_on_warnings=True)

            cur = conn.cursor(buffered=True)
            conn.autocommit = True
            query_cur = QueryCursor(cur)
            is_primary = check_current_cluster_is_primary(query_cur)
            if is_primary:
                logging.exception(
                    """primary cluster changed : cluster_id = {0}, ip = {1}, port = {2}"""
                    .format(standby_cluster_info['cluster_id'],
                            standby_cluster_info['ip'],
                            standby_cluster_info['port']))
                raise e

            ## process
            do_each_tenant_dml_actions(cur, tenant_id_list)

            cur.close()
            conn.close()
    except Exception, e:
        logging.exception(
            """do_each_tenant_dml_actions_by_standby_cluster failed""")
        raise e