Exemple #1
0
def delete_object(info):
    """Delete object from DB"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if object_id already exists, then create object if not
    object_id = ""
    for item in db.query("select * from Object where name='{0}'".format(info["hostname"])):
        object_id = item.id

    # delete object if already exists
    if object_id:
        url = """http://{0}/racktables/index.php?module=redirect&op=deleteObject&page=depot&tab=addmore&object_id={1}""".format(
            rt_server, object_id
        )
        req = requests.get(url, auth=rt_auth)
        if req.status_code != requests.codes.ok:
            print "Failed to delete the existing object: {0}".format(info["hostname"])
            return False
        else:
            print "OK - Deleted the existing object: {0}".format(info["hostname"])

    # close db
    db.close()

    return True
Exemple #2
0
    def get_project_keys_from_mysql(self):
        from torndb import Connection
        db = Connection(
            "%s:%s" % (self.config.MYSQL_HOST, self.config.MYSQL_PORT),
            self.config.MYSQL_DB,
            user=self.config.MYSQL_USER,
            password=self.config.MYSQL_PASS
        )

        query = "select project_id, public_key, secret_key from sentry_projectkey"
        logging.info("Executing query %s in MySQL", query)

        project_keys = {}

        try:
            db_projects = db.query(query)

            if db_projects is None:
                return None

            for project in db_projects:
                logging.info("Updating information for project with id %s...", project.project_id)
                self.add_project(project_keys, project.project_id, project.public_key, project.secret_key)

        finally:
            db.close()
        return project_keys
Exemple #3
0
def _connection(config):
    conn = None
    try:
        if config.DB_BACKEND == 'mysql':
            from torndb import Connection
            conn = Connection("%s:%s" % (config.DB_HOST, config.DB_PORT),
                              config.DB_NAME,
                              user=config.DB_USER,
                              password=config.DB_PASS)
        elif config.DB_BACKEND == 'sqlite':
            import sqlite3
            conn = sqlite3.connect(config.DB_NAME)
            conn.row_factory = _dict_factory
        elif config.DB_BACKEND == 'postgres':
            import psycopg2
            from psycopg2.extras import DictConnection

            conn = psycopg2.connect(
                database=config.DB_NAME,
                user=config.DB_USER,
                password=config.DB_PASS,
                host=config.DB_HOST,
                port=config.DB_PORT,
                connection_factory=DictConnection,
            )
        else:
            raise ValueError("Unknown backend %r" % config.DB_BACKEND)

        yield conn
    finally:
        if conn is not None:
            conn.close()
Exemple #4
0
def get_env_iter():

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')


    v_show_env_type = config.SHOW_ENV_TYPE

    if v_show_env_type==1:

        v_more_sql = ''

    elif v_show_env_type==2:  #id =3 表示生产环境

        v_more_sql = ' and id=3'

    elif v_show_env_type==3:

        v_more_sql = ' and id!=3'

    else:
        pass

    str_sql = "select id,name from resources_env where 1=1 %s order by id"  % (v_more_sql)
    

    env_list = db.iter(str_sql)

    db.close()

    return env_list
Exemple #5
0
def update_rack(info, object_id):
    """Automate server audit for rack info into Racktables"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # update the rackspace
    if opts['rackspace']:
        rs_info = opts['rackspace'].split(':')
        colo = "".join(rs_info[0:1])
        row = "".join(rs_info[1:2])
        rack = "".join(rs_info[2:3])
        atom = "".join(rs_info[3:4])
        if not atom:
            print "The rackspace is not correct"
            return False

        # get rack_id
        for item in db.query(
                "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'"
                .format(rack, colo, row)):
            rack_id = item.id
        if not rack_id:
            print "Failed to get rack_id"
            return False

        atom_list = atom.split(',')
        atom_data = []
        for i in atom_list:
            if opts['rackposition']:
                if opts['rackposition'] in ['left', 'front']:
                    atom_data.append("&atom_{0}_{1}_0=on".format(rack_id, i))
                if opts['rackposition'] in ['right', 'back']:
                    atom_data.append("&atom_{0}_{1}_2=on".format(rack_id, i))
                if opts['rackposition'] in ['interior']:
                    atom_data.append("&atom_{0}_{1}_1=on".format(rack_id, i))
            else:
                atom_data.append(
                    "&atom_{0}_{1}_0=on&atom_{0}_{1}_1=on&atom_{0}_{1}_2=on".
                    format(rack_id, i))
        atom_url = "".join(atom_data)

        url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=rackspace&op=updateObjectAllocation""".format(
            rt_server)
        payload = """object_id={0}&rackmulti%5B%5D={1}&comment=&got_atoms=Save{2}"""\
                  .format(object_id,rack_id,atom_url)
        req = requests.post(url,
                            data=payload,
                            headers=rt_headers,
                            auth=rt_auth)
        if req.status_code != requests.codes.ok:
            print "Failed to update rackspace"
            return False
        print "OK - Updated rackspace"

    # close db
    db.close()

    # end
    return True
Exemple #6
0
def delete_object(info):
    """Delete object from DB"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if object_id already exists, then create object if not
    object_id = ""
    for item in db.query("select * from Object where name='{0}'".format(
            info['hostname'])):
        object_id = item.id

    # delete object if already exists
    if object_id:
        url = """http://{0}/racktables/index.php?module=redirect&op=deleteObject&page=depot&tab=addmore&object_id={1}"""\
              .format(rt_server,object_id)
        req = requests.get(url, auth=rt_auth)
        if req.status_code != requests.codes.ok:
            print "Failed to delete the existing object: {0}".format(
                info['hostname'])
            return False
        else:
            print "OK - Deleted the existing object: {0}".format(
                info['hostname'])

    # close db
    db.close()

    return True
Exemple #7
0
def _connection(config):
    conn = None
    try:
        if config.DB_BACKEND == 'mysql':
            from torndb import Connection
            conn = Connection(
                "%s:%s" % (config.DB_HOST, config.DB_PORT),
                config.DB_NAME,
                user=config.DB_USER,
                password=config.DB_PASS
            )
        elif config.DB_BACKEND == 'sqlite':
            import sqlite3
            conn = sqlite3.connect(config.DB_NAME)
            conn.row_factory = _dict_factory
        elif config.DB_BACKEND == 'postgres':
            import psycopg2
            from psycopg2.extras import DictConnection

            conn = psycopg2.connect(
                database=config.DB_NAME,
                user=config.DB_USER,
                password=config.DB_PASS,
                host=config.DB_HOST,
                port=config.DB_PORT,
                connection_factory=DictConnection,
            )
        else:
            raise ValueError("Unknown backend %r" % config.DB_BACKEND)

        yield conn
    finally:
        if conn is not None:
            conn.close()
Exemple #8
0
def update_rack(info, object_id):
    """Automate server audit for rack info into Racktables"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # update the rackspace
    if opts["rackspace"]:
        rs_info = opts["rackspace"].split(":")
        colo = "".join(rs_info[0:1])
        row = "".join(rs_info[1:2])
        rack = "".join(rs_info[2:3])
        atom = "".join(rs_info[3:4])
        if not atom:
            print "The rackspace is not correct"
            return False

        # get rack_id
        for item in db.query(
            "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'".format(
                rack, colo, row
            )
        ):
            rack_id = item.id
        if not rack_id:
            print "Failed to get rack_id"
            return False

        atom_list = atom.split(",")
        atom_data = []
        for i in atom_list:
            if opts["rackposition"]:
                if opts["rackposition"] in ["left", "front"]:
                    atom_data.append("&atom_{0}_{1}_0=on".format(rack_id, i))
                if opts["rackposition"] in ["right", "back"]:
                    atom_data.append("&atom_{0}_{1}_2=on".format(rack_id, i))
                if opts["rackposition"] in ["interior"]:
                    atom_data.append("&atom_{0}_{1}_1=on".format(rack_id, i))
            else:
                atom_data.append("&atom_{0}_{1}_0=on&atom_{0}_{1}_1=on&atom_{0}_{1}_2=on".format(rack_id, i))
        atom_url = "".join(atom_data)

        url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=rackspace&op=updateObjectAllocation""".format(
            rt_server
        )
        payload = """object_id={0}&rackmulti%5B%5D={1}&comment=&got_atoms=Save{2}""".format(
            object_id, rack_id, atom_url
        )
        req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
        if req.status_code != requests.codes.ok:
            print "Failed to update rackspace"
            return False
        print "OK - Updated rackspace"

    # close db
    db.close()

    # end
    return True
def execute_query(query, **kw_dict):
    """Execute query
    """
    conn = Connection(**MYSQL_SETTINGS)
    if kw_dict is None:
        try:
            result = conn.execute(query)
            conn.close()
            return result
        except Exception, e:
            print e
def get_query_result(query, **kw_dict):
    """get query result
    """
    conn = Connection(**MYSQL_SETTINGS)
    if kw_dict is None:
        try:
            result = conn.query(query)
            conn.close()
            return result
        except Exception, e:
            print e
Exemple #11
0
def delete_posts(sharedfile_id=0, **kwargs):
    """
    This task will update the post table setting any post containing the shared file
    to deleted = 1
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)
    db.execute(
        """UPDATE post SET deleted=1 WHERE sharedfile_id = %s AND deleted=0""",
        sharedfile_id)
    db.close()
Exemple #12
0
def update_blank_switch_security_pdu_offline(info):
    """Automate server autodir for PatchPanel/NetworkSwitch/NetworkSecurity/PDU into Racktables or as offline mode"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # delete object if already exists
    delete_object(info)

    # create object
    url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format(rt_server)
    if opts["blank"]:
        payload = """0_object_type_id=9&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format(
            info["hostname"]
        )
    if opts["switch"]:
        payload = """0_object_type_id=8&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format(
            info["hostname"]
        )
    if opts["security"]:
        payload = """0_object_type_id=798&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format(
            info["hostname"]
        )
    if opts["pdu"]:
        payload = """0_object_type_id=2&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format(
            info["hostname"]
        )
    if opts["offline"]:
        payload = """0_object_type_id=4&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format(
            info["hostname"]
        )

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to create object: {0}".format(info["hostname"])
        return False
    else:
        print "OK - Created object: {0}".format(info["hostname"])

    # get object_id
    for item in db.query("select * from Object where name='{0}'".format(info["hostname"])):
        object_id = item.id
    if not object_id:
        print "Failed to get object_id"
        return False

    # update rack info
    update_rack(info, object_id)

    # close db
    db.close()

    # end
    return True
Exemple #13
0
def read_db(info):
    """Get info from Racktables DB"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if object_id already exists
    object_id = ""
    for item in db.query("select * from Object where name='{0}'".format(
            info['hostname'])):
        object_id = item.id
    if not object_id:
        print "Object:{0} does not exist".format(info['hostname'])
        return False

    # get the location info
    rack_id_list = []
    unit_no_list = []
    for item in db.query(
            "select rack_id,unit_no from RackSpace where object_id=(select id from Object where name='{0}')"
            .format(info['hostname'])):
        rack_id_list.append(int(item.rack_id))
        unit_no_list.append(int(item.unit_no))
    if not item:
        print "Object:{0} does not have location info".format(info['hostname'])
        return False
    rack_id = ','.join(str(i) for i in list(set(rack_id_list)))
    unit_no = ','.join(str(i) for i in list(set(unit_no_list)))

    location_name = ""
    row_name = ""
    rack_name = ""
    for item in db.query(
            "select location_name,row_name,name from Rack where id='{0}'".
            format(rack_id)):
        location_name = item.location_name
        row_name = item.row_name
        rack_name = item.name
    if not location_name or not row_name or not rack_name:
        print "Object:{0} does not have location info".format(info['hostname'])
        return False
    print "RACKSPACE:   {0}:{1}:{2}:{3}".format(location_name, row_name,
                                                rack_name, unit_no)

    # close db
    db.close()

    return {
        'location_name': location_name,
        'row_name': row_name,
        'rack_name': rack_name,
        'unit_no': unit_no
    }
Exemple #14
0
def update_blank_switch_security_pdu_offline(info):
    """Automate server autodir for PatchPanel/NetworkSwitch/NetworkSecurity/PDU into Racktables or as offline mode"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # delete object if already exists
    delete_object(info)

    # create object
    url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format(
        rt_server)
    if opts['blank']:
        payload = """0_object_type_id=9&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\
                  .format(info['hostname'])
    if opts['switch']:
        payload = """0_object_type_id=8&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\
                  .format(info['hostname'])
    if opts['security']:
        payload = """0_object_type_id=798&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\
                  .format(info['hostname'])
    if opts['pdu']:
        payload = """0_object_type_id=2&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\
                  .format(info['hostname'])
    if opts['offline']:
        payload = """0_object_type_id=4&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\
                  .format(info['hostname'])

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to create object: {0}".format(info['hostname'])
        return False
    else:
        print "OK - Created object: {0}".format(info['hostname'])

    # get object_id
    for item in db.query("select * from Object where name='{0}'".format(
            info['hostname'])):
        object_id = item.id
    if not object_id:
        print "Failed to get object_id"
        return False

    # update rack info
    update_rack(info, object_id)

    # close db
    db.close()

    # end
    return True
Exemple #15
0
def log_dba_jobs_progress(v_job_id,v_cur_prog_desc,v_cur_cum_prog_desc,v_cur_prog_com_rate):

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_add_job_progress_sql='''insert into dba_job_progress(job_id,cur_prog_desc,cur_cum_prog_desc,cur_prog_com_rate) values(%d,'%s','%s',%d)''' % (
                v_job_id,v_cur_prog_desc,v_cur_cum_prog_desc,v_cur_prog_com_rate)

    db.execute(v_add_job_progress_sql.replace('%','%%'))

    db.close()
Exemple #16
0
def list_object(info):
    """List the objects of the given rackspace"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if rackspace is correct
    rs_info = opts['hostname'].split(':')
    colo = "".join(rs_info[0:1])
    row = "".join(rs_info[1:2])
    rack = "".join(rs_info[2:3])
    if not rack:
        print "The rackspace is not correct"
        return False

    # get rack_id
    for item in db.query(
            "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'"
            .format(rack, colo, row)):
        rack_id = item.id
    if not rack_id:
        print "Failed to get rack_id"
        return False

    # get object_id
    object_id_list = []
    for item in db.query(
            "select * from RackSpace where rack_id={0}".format(rack_id)):
        object_id_list.append(item.object_id)
    if len(object_id_list) == 0:
        print "Failed to get object_id"
        return False

    # get rid of the duplicated items then sort and read one by one
    for object_id in sorted(list(set(object_id_list))):
        for item in db.query(
                "select * from Object where id={0}".format(object_id)):
            object_name = item.name
            object_type_id = item.objtype_id
            for item in db.query(
                    "select * from Dictionary where dict_key={0}".format(
                        object_type_id)):
                object_type_name = item.dict_value
        print "{0}: {1}".format(object_type_name, object_name)

    # close db
    db.close()

    return True
Exemple #17
0
def calculate_likes(sharedfile_id, **kwargs):
    """
    This task will get all likes for a shared file, count them, then update the sharedfile.like_count
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)
    result = db.get(
        "SELECT count(id) as like_count from favorite where sharedfile_id = %s and deleted=0",
        (sharedfile_id))
    like_count = int(result['like_count'])
    db.execute("UPDATE sharedfile set like_count = %s WHERE id = %s",
               like_count, sharedfile_id)

    tweet_or_magic(db, sharedfile_id, like_count)
    db.close()
Exemple #18
0
def final_dba_job(v_job_id):

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    # 更新job队列状态为完成
    v_update_job_sql = '''update dba_jobs set status=1 where job_id=%d''' % (
        v_job_id)


    db.execute(v_update_job_sql.replace('%','%%'))

    db.close()
Exemple #19
0
def initial_dba_job(v_op_user,v_op_comment):

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_add_job_sql = '''insert into dba_jobs(op_user,job_desc) values('%s','%s')''' % (
            v_op_user,v_op_comment)

    v_job_id=db.execute(v_add_job_sql.replace('%','%%'))

    db.close()

    return v_job_id
Exemple #20
0
def upload_processlist():

    # 连接配置中心库
    # db = Connection('/home/apps/inception/inc.socket',
    #                 '',
    #                 '',
    #                 '',
    #                 time_zone='+8:00')

    db = Connection("127.0.0.1:6669", "", "", "", time_zone="+8:00")

    print "aa"

    v_sql = r"""/*--user=mysqladmin;--password=mysql;--host=172.26.137.125;
    --enable-check;--port=3306;*/  
    inception_magic_start;  
    use test;  
    CREATE TABLE adaptive_office23(id int);  
    inception_magic_commit;"""

    # print v_sql

    upload_server_list = db.iter(v_sql)

    if upload_server_list:  # 对实例表进行循环

        i = 0

        print upload_server_list

        for upload_server in upload_server_list:

            stage = upload_server["stage"]

            print stage

            stagestatus = upload_server["stagestatus"]

            print stagestatus

        #     mysql_port = upload_server['port']

        #     v_host =host_ip + ':' + str(mysql_port)

        #     i=i+1

    db.close()
Exemple #21
0
def read_db(info):
    """Get info from Racktables DB"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if object_id already exists
    object_id = ""
    for item in db.query("select * from Object where name='{0}'".format(info["hostname"])):
        object_id = item.id
    if not object_id:
        print "Object:{0} does not exist".format(info["hostname"])
        return False

    # get the location info
    rack_id_list = []
    unit_no_list = []
    for item in db.query(
        "select rack_id,unit_no from RackSpace where object_id=(select id from Object where name='{0}')".format(
            info["hostname"]
        )
    ):
        rack_id_list.append(int(item.rack_id))
        unit_no_list.append(int(item.unit_no))
    if not item:
        print "Object:{0} does not have location info".format(info["hostname"])
        return False
    rack_id = ",".join(str(i) for i in list(set(rack_id_list)))
    unit_no = ",".join(str(i) for i in list(set(unit_no_list)))

    location_name = ""
    row_name = ""
    rack_name = ""
    for item in db.query("select location_name,row_name,name from Rack where id='{0}'".format(rack_id)):
        location_name = item.location_name
        row_name = item.row_name
        rack_name = item.name
    if not location_name or not row_name or not rack_name:
        print "Object:{0} does not have location info".format(info["hostname"])
        return False
    print "RACKSPACE:   {0}:{1}:{2}:{3}".format(location_name, row_name, rack_name, unit_no)

    # close db
    db.close()

    return {"location_name": location_name, "row_name": row_name, "rack_name": rack_name, "unit_no": unit_no}
Exemple #22
0
def get_job_status(v_job_id):

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_get_sql = '''SELECT cur_cum_prog_desc,cur_prog_com_rate,cur_prog_shell_cmd from dba_job_progress where id=(select max(id) from dba_job_progress where job_id=%d)''' % (
        v_job_id)

    job_list = db.query(v_get_sql)
            

    db.close()

    return job_list
Exemple #23
0
def list_object(info):
    """List the objects of the given rackspace"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if rackspace is correct
    rs_info = opts["hostname"].split(":")
    colo = "".join(rs_info[0:1])
    row = "".join(rs_info[1:2])
    rack = "".join(rs_info[2:3])
    if not rack:
        print "The rackspace is not correct"
        return False

    # get rack_id
    for item in db.query(
        "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'".format(rack, colo, row)
    ):
        rack_id = item.id
    if not rack_id:
        print "Failed to get rack_id"
        return False

    # get object_id
    object_id_list = []
    for item in db.query("select * from RackSpace where rack_id={0}".format(rack_id)):
        object_id_list.append(item.object_id)
    if len(object_id_list) == 0:
        print "Failed to get object_id"
        return False

    # get rid of the duplicated items then sort and read one by one
    for object_id in sorted(list(set(object_id_list))):
        for item in db.query("select * from Object where id={0}".format(object_id)):
            object_name = item.name
            object_type_id = item.objtype_id
            for item in db.query("select * from Dictionary where dict_key={0}".format(object_type_id)):
                object_type_name = item.dict_value
        print "{0}: {1}".format(object_type_name, object_name)

    # close db
    db.close()

    return True
Exemple #24
0
def add_posts(shake_id=0,
              sharedfile_id=0,
              sourcefile_id=0,
              deleted=0,
              created_at=None,
              **kwargs):
    """
    This task will get the subscribers for a shake and insert
    a post for every user. If the sourcefile_id exists in posts
    it will set "seen" equal to 1.
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)

    #get subscribers for shake_id
    results = db.query(
        """SELECT user_id as id FROM subscription WHERE shake_id = %s and deleted = 0""",
        shake_id)
    for user in results:
        seen = 0
        #does this sourcefile exist in post for this user?
        existing = db.query(
            """SELECT id FROM post WHERE user_id = %s and sourcefile_id = %s and deleted=0 ORDER BY created_at desc LIMIT 1""",
            user['id'], sourcefile_id)
        #if so, set seen = 1
        if existing:
            seen = 1
        #if not, insert a new row
        if created_at:
            db.execute(
                """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, %s)""",
                user['id'], sourcefile_id, sharedfile_id, seen, deleted,
                shake_id, created_at)
        else:
            db.execute(
                """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, NOW())""",
                user['id'], sourcefile_id, sharedfile_id, seen, deleted,
                shake_id)
    #now insert a post for the user who originally posted it.
    sharedfile = db.get("""SELECT user_id from sharedfile WHERE id = %s""",
                        sharedfile_id)
    db.execute(
        """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, NOW())""",
        sharedfile['user_id'], sourcefile_id, sharedfile_id, 1, 0, shake_id)
    db.close()
Exemple #25
0
def get_domain_list_from_env(v_role_id,belong_env):

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')


    str_sql = '''select b.id,b.name from resources_role_app a,resources_app b where 
    a.app_id = b.id and a.role_id = %d and b.app_type= %d ''' % (v_role_id,belong_env)
        
    

    app_list = db.query(str_sql)

    db.close()

    return app_list
Exemple #26
0
def remote_start_mysql_server(v_host,v_os_user,v_os_password,
    v_db_port): 

    # v_mysql_version 1: mysql5 4:mariadb5 5:mariadb10
    # 
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    # 获得Mysql server 版本: mysql5,mariadb5,mariadb10
    v_get_sql = r'''SELECT case mysql_version when 1 then 'mysql5' when 4 then 'mariadb5' when 5 then 'mariadb10' when 6 then 'percona5.6' when 7 then 'pxc5.6' end mysql_version from tag where ip='%s' and port=%d ''' % (v_host,int(v_db_port))

    v_list = db.get(v_get_sql)

    v_mysql_version = v_list['mysql_version']
            

    db.close()

            
    #v_db_socket='--socket=/tmp/mysql'+str(v_db_port)+'.sock'
    #nohup 防止paramiko 进程退出后,中断执行
    # 必须要加 1>/dev/null 2>&1,否则命令不执行。可能是paramiko 模式下,不加的话,执行命令时日志无法输出
    
    v_exe_cmd = r'''nohup /apps/svr/%s/bin/mysqld_safe --defaults-file=%s/%s_%d.cnf 1>/dev/null 2>&1 &''' % (
                v_mysql_version,config.mysql_conf_path,v_mysql_version,v_db_port)

    print v_exe_cmd

    # 远程paramiko调用 在本机执行sql  

    result = remote_shell_cmd_no_result(v_host,v_os_user,v_os_password,v_exe_cmd)

    time.sleep(20)   # 等待20秒 mysql 完全启动,更好的方法是,做个循环判断mysql 完全起来了,再退出



    
    return result  #返回空串表示成功
Exemple #27
0
def calculate_saves(sharedfile_id, **kwargs):
    """
    Take the id of a sharedfile that just got saved by another user.

    If this file is an original, meaning it has no original_id set, we can safely calculate the save count by how
    many sharedfiles have the file's id as their original_id.
    
    However if the file has an original, we also need to recalculate the original's count, as well as this file's save count. 
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)
    sharedfile = db.get("select original_id from sharedfile where id = %s",
                        sharedfile_id)
    original_id = sharedfile['original_id']

    # If this file is original, calculate it's save count by all sharedfile where this file is the original_id.
    if original_id == 0:
        original_saves = db.get(
            "SELECT count(id) AS count FROM sharedfile where original_id = %s and deleted = 0",
            sharedfile_id)
        db.execute("UPDATE sharedfile set save_count = %s WHERE id = %s",
                   original_saves['count'], sharedfile_id)
    # Otherwise, we need to update the original's save count and this file's save count.
    else:
        original_saves = db.get(
            "SELECT count(id) AS count FROM sharedfile where original_id = %s and deleted = 0",
            original_id)
        db.execute("UPDATE sharedfile set save_count = %s WHERE id = %s",
                   original_saves['count'], original_id)

        # Calc this files new save count, only based on parent since its not original.
        parent_saves = db.get(
            "SELECT count(id) AS count FROM sharedfile where parent_id = %s and deleted = 0",
            sharedfile_id)
        db.execute("UPDATE sharedfile set save_count = %s WHERE id = %s",
                   parent_saves['count'], sharedfile_id)
    db.close()
Exemple #28
0
def remote_off_get_datadir_path(v_host,v_db_port,v_type): 

        
    # v_mysql_version 1: mysql5 4:mariadb5 5:mariadb10
    # 
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    # 获得Mysql server 版本: mysql5,mariadb5,mariadb10
    v_get_sql = r'''SELECT case mysql_version when 1 then 'mysql5' when 4 then 'mariadb5' when 5 then 'mariadb10' when 6 then 'percona5.6' when 7 then 'pxc5.6' end mysql_version from tag where ip='%s' and port=%d ''' % (v_host,int(v_db_port))

    v_list = db.get(v_get_sql)

    v_mysql_version = v_list['mysql_version']
            

    db.close()

    # /apps/dbdat/mariadb10_data3306
    # 
    if v_type==1:

        v_datadir_path = '%s/%s_data%s' % (config.mysql_datadir_path,v_mysql_version,v_db_port)

    else:

        v_datadir_path = '%s_data%s' % (v_mysql_version,v_db_port)
    
   
    print v_datadir_path

    
    
    return v_datadir_path
Exemple #29
0
    def update(self):
        config = self.application.config
        db = Connection(
            "%s:%s" % (config.MYSQL_HOST, config.MYSQL_PORT),
            config.MYSQL_DB,
            user=config.MYSQL_USER,
            password=config.MYSQL_PASS
        )

        query = "select project_id, public_key, secret_key from sentry_projectkey"
        logging.info("Executing query %s in MySQL" % query)

        projects = {}

        try:
            db_projects = db.query(query)

            if db_projects is None:
                logging.warn("Could not retrieve nformation from sentry's database because MySQL Server was unavailable")
                return

            for project in db_projects:
                logging.info("Updating information for project with id %s..." % project.project_id)

                if not project.project_id in projects.keys():
                    projects[project.project_id] = {
                        "public_key": [],
                        "secret_key": []
                    }

                projects[project.project_id]['public_key'].append(project.public_key)
                projects[project.project_id]['secret_key'].append(project.secret_key)

            self.application.project_keys = projects
        finally:
            db.close()
class MySQLHelper:

    def __init__(self, host, user, password, database):
        self._db = Connection(host, database, user=user, password=password, max_idle_time=10)
        if not self._db._db:
            raise Exception('%s' % self._db.error_msg)

    def query(self, sql, *parameters):
        return self._db.query(sql, *parameters)

    def query_one(self, sql, *parameters):
        res = self._db.query(sql, *parameters)
        return res.pop() if res else {}

    def write(self, sql, *parameters):
        return self._db.execute(sql, *parameters)

    def gen_insert(self, tablename, rowdict, replace=False):
        return self._db.gen_insert_sql(tablename, rowdict, replace)

    def insert_dict(self, tablename, rowdict):
        key_strs = ", ".join(["""`%s`""" % key for key in rowdict.keys()])
        value_strs = ", ".join(["""'%s'""" % rowdict.get(key) for key in rowdict.keys()])
        sql = """INSERT INTO %s (%s) VALUES (%s)""" % (tablename, key_strs, value_strs)
        return self._db.execute(sql)

    def insert_batch(self, tablename, batch_params):
        value_batch = []
        for param in batch_params:
            keys = param.keys()
            key_strs = ", ".join(["""`%s`""" % key for key in keys])
            value_strs = "(%s)" % ", ".join(
                ["""'%s'""" % "%s" % param.get(key) for key in keys])
            value_batch.append(value_strs)
        sql = """INSERT INTO %s (%s) VALUES %s""" % (tablename, key_strs, ",".join(value_batch))
        return self._db.execute(sql)

    def update_dict(self, tablename, rowdict, where):
        sql = """UPDATE %s SET %s WHERE %s""" % (
        tablename, self._formatter(rowdict, ', '), self._formatter(where, " AND "))
        return self._db.execute(sql)

    def transaction(self, query, parameters):
        return self._db.transaction(query, parameters)

    def get(self, tablename, conds, cols='', extra_conds={}):
        if not tablename:
            return False
        cols = "%s" % ','.join(cols) if cols else '*'
        wheres = []
        values = []
        if conds and isinstance(conds, dict):
            for key, value in conds.items():
                if isinstance(value, (list, tuple)):
                    wheres.append("`%s` IN (%s)" % (key, "'%s'" % "','".join([str(v) for v in value])))
                else:
                    wheres.append("`%s`=%%s" % key)
                    values.append("%s" % value)
        where_str = ' AND '.join(wheres)
        sql = """ SELECT %s FROM `%s` """ % (cols, tablename)
        if where_str:
            sql += """ WHERE %s """ % (where_str)
        if extra_conds.get('group_by'):
            sql += """ GROUP by %s """ % ','.join(extra_conds['group_by'])
        if extra_conds.get('order_by'):
            sql += """ ORDER by %s """ % ','.join(extra_conds['order_by'])
        if extra_conds.get('limit'):
            sql += """ LIMIT %s """ % ','.join(map(str, extra_conds['limit']))

        return self._db.query(sql, *values)


    def _serialize(self, value):
        if isinstance(value, (dict, list, set)):
            value = json.dumps(value)
        else:
            value = "%s" % value
        return value

    def _formatter(self, pairs, delimiter):
        values = []
        for key, value in pairs.items():
            if not isinstance(value, list):
                value = self._serialize(value)
                values.append("""`%s`='%s'""" % (key, value))
            else:
                values.append("""`%s` in ("%s")""" % (key, '","'.join([self._serialize(val) for val in value])))
        return delimiter.join(values)

    def __del__(self):
        self._db.close()
def add_single_backup_job(v_setup_id):  # mysql_ins_bak_setup 表的当前更新或插入的备份实例id

    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    scheduler = BackgroundScheduler()  # 默认内存的jobstore

    url = "sqlite:////home/apps/dbajob.sqlite"

    scheduler.add_jobstore("sqlalchemy", url=url, alias="sqlite_js")

    scheduler.print_jobs()

    print "a"

    # 连接配置中心库,获取数据库备份周期等信息
    db = Connection("/tmp/mysql3306.sock", config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone="+8:00")

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id and a.id=%d""" % (
        v_setup_id
    )

    print v_sql

    bak_server = db.get(v_sql)

    instance_id = bak_server["instance_id"]

    from_host = bak_server["ip"]

    # print from_host

    mysql_port = bak_server["port"]

    backup_interval_type = bak_server["backup_interval_type"]

    backup_start_time = bak_server["backup_start_time"]

    str_start_date = time.strftime("%Y-%m-%d") + " " + backup_start_time

    print str_start_date

    v_job_id = "backup_%s_%s" % (from_host, str(mysql_port))

    if backup_interval_type == 1:  # every day

        # scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            days=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    elif backup_interval_type == 2:  # every week weeks=1

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            weeks=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    elif backup_interval_type == 3:  # every hour hours=1

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            hours=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    else:
        pass

    scheduler.print_jobs()

    print "b"

    scheduler.start()

    scheduler.print_jobs()

    print "c"

    # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

    v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
    values(%d,0)""" % (
        instance_id
    )

    db.execute(v_sche_start_sql)

    db.close()
def add_schedule_backup_job():
    # if __name__ == '__main__':
    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    scheduler = BackgroundScheduler()  # 默认内存的jobstore

    url = "sqlite:////home/apps/dbajob.sqlite"

    scheduler.add_jobstore("sqlalchemy", url=url, alias="sqlite_js")

    scheduler.print_jobs()

    print "a"

    scheduler.remove_all_jobs(jobstore="sqlite_js")

    scheduler.print_jobs()

    print "remove"

    # v_current_jobs = scheduler.get_jobs()

    # print v_current_jobs

    # if v_current_jobs:  # 如果job存在的话,先请客

    #     scheduler.remove_job('backup')

    # 连接配置中心库,获取数据库备份周期等信息
    db = Connection("/tmp/mysql3306.sock", config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone="+8:00")

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id """

    print v_sql

    bak_server_list = db.query(v_sql)

    if bak_server_list:  # 有server需要配置

        i = 0

        # 把还没有开始的调度任务,置为手工结束 backup_result_type=4
        v_manual_end_sql = "update mysql_ins_bak_log set backup_result_type=4 where backup_result_type=0"

        db.execute(v_manual_end_sql)

        for bak_server in bak_server_list:

            instance_id = bak_server["instance_id"]

            from_host = bak_server["ip"]

            # print from_host

            mysql_port = bak_server["port"]

            backup_interval_type = bak_server["backup_interval_type"]

            backup_start_time = bak_server["backup_start_time"]

            str_start_date = time.strftime("%Y-%m-%d") + " " + backup_start_time

            print str_start_date

            v_job_id = "backup_%s_%s" % (from_host, str(mysql_port))

            if backup_interval_type == 1:  # every day

                # scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    days=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            elif backup_interval_type == 2:  # every week weeks=1

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    weeks=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            elif backup_interval_type == 3:  # every hour hours=1

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    hours=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            else:
                pass
            # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

            v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
            values(%d,0)""" % (
                instance_id
            )

            db.execute(v_sche_start_sql)

            i = i + 1

        scheduler.print_jobs()

        print "b"

        scheduler.start()

        scheduler.print_jobs()

        print "c"

    db.close()
Exemple #33
0
def generate_mysql_config(tlp_file, schema_file, force=False):
    """
    Args:
        tlp_file(unicode): config template to be written in to config center db
        schema_file(unicode): scheme file
    """

    if not os.path.exists(tlp_file):
        raise ImproperlyConfigured(u"invalid config template file `%s`" %
                                   tlp_file)
    if not os.path.exists(schema_file):
        raise ImproperlyConfigured(u"invalid config scheme file path `%s`" %
                                   schema_file)
    with open(tlp_file) as fi:
        config = json.load(fi)
    with open(schema_file) as fi:
        schema = json.load(fi)
        Draft4Validator(schema).validate(config)
    config_center = settings.CONFIG_CENTER
    env = settings.ENV
    logging.warn("init config center with env `%s`" % env)
    from torndb import Connection
    db = None
    try:
        db_name = config_center["database"]
        db = Connection(host=config_center["host"],
                        database=db_name,
                        user=config_center["user"],
                        password=config_center["password"])
        table_name = "t_s_config_%s" % env
        if force:
            db.execute("DROP TABLE if EXISTS %s" % table_name)
        if db.query("show tables where Tables_in_%s = '%s'" %
                    (db_name, table_name)):
            raise RuntimeError(
                u"config center of env `%s` has already been initialized !")
        create_table_sql = u"""
            CREATE TABLE IF NOT EXISTS %s (
              `id` int(20) unsigned zerofill NOT NULL AUTO_INCREMENT,
              `node` varchar(40) COLLATE utf8_bin NOT NULL COMMENT '模块节点名',
              `key` varchar(40) COLLATE utf8_bin NOT NULL COMMENT '模块下键名',
              `value` varchar(200) COLLATE utf8_bin DEFAULT NULL COMMENT '模块键值',
              `key_type` varchar(6) COLLATE utf8_bin NOT NULL DEFAULT 'string' COMMENT '配置键对应的类型',
              `key_note` varchar(45) COLLATE utf8_bin DEFAULT NULL COMMENT '键名备注说明',
              PRIMARY KEY (`id`),
              UNIQUE KEY `unique_config` (`node`,`key`) USING BTREE COMMENT '配置唯一索引'
            ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='%s环境 系统运行配置表'"""\
                           % (table_name, env)
        db.execute(create_table_sql)
        sql = "INSERT INTO %s" % table_name
        sql += "(`node`, `key`, `value`, `key_type`) VALUES (%s, %s, %s, %s)"
        datas = []
        for key, value in config.items():
            for k, v in value.items():
                key_type = "string"
                if isinstance(v, list):
                    v = ", ".join(v)
                elif isinstance(v, bool):
                    key_type = "bool"
                elif isinstance(v, int):
                    key_type = "int"

                data = (key, k, v, key_type)
                datas.append(data)
        db.executemany_rowcount(sql, datas)
    except KeyError as e:
        raise ImproperlyConfigured(u"need config key `%s` of config_center" %
                                   (e.args[0]))
    finally:
        if db:
            db.close()
                print from_host, mysql_port, os_user, os_password

                #scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])
                scheduler.add_interval_job(backup, days=1, start_date='2014-07-18 18:17:01', args=[from_host, mysql_port, os_user, os_password])

            elif backup_interval_type == 2: # every week

                scheduler.add_interval_job(backup, weeks=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

            elif backup_interval_type == 3: # every hour

                scheduler.add_interval_job(backup, hours=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

            i=i+1


    db.close()

    #scheduler.add_interval_job(func.remote_mysql_backup, seconds=5, args=[from_host, port, os_user, os_password])
    #scheduler.add_interval_job(log_w, seconds=5, start_date='2014-07-18 16:21', args=['abc'])
    

    if bak_server_list: # 有server需要配置

        print('Press Ctrl+C to exit')

        try:
            scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            pass
Exemple #35
0
def migrate_for_user(user_id=0, **kwargs):
    """
    This tasks handles copying MLKSHK user data over to the MLTSHP tables.

    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)

    state = db.get("SELECT is_migrated FROM migration_state WHERE user_id=%s",
                   user_id)
    if not state or state["is_migrated"] == 1:
        logger.info("User %s already migrated" % str(user_id))
        db.close()
        return

    logger.info("Migrating user_id %s..." % str(user_id))

    logger.info("- app records")
    db.execute(
        """INSERT IGNORE INTO app (id, user_id, title, description, secret, redirect_url, deleted, created_at, updated_at) SELECT ma.id, ma.user_id, ma.title, ma.description, ma.secret, ma.redirect_url, 0, ma.created_at, ma.updated_at FROM mlkshk_app ma WHERE ma.user_id=%s AND ma.deleted=0""",
        user_id)

    # comments
    logger.info("- comment records")
    db.execute(
        """INSERT IGNORE INTO comment (id, user_id, sharedfile_id, body, deleted, created_at, updated_at) SELECT mc.id, mc.user_id, mc.sharedfile_id, mc.body, mc.deleted, mc.created_at, mc.updated_at FROM mlkshk_comment mc WHERE mc.user_id=%s AND mc.deleted=0""",
        user_id)

    # conversation
    logger.info("- conversation records")
    db.execute(
        """INSERT IGNORE INTO conversation (id, user_id, sharedfile_id, muted, created_at, updated_at) SELECT mc.id, mc.user_id, mc.sharedfile_id, mc.muted, mc.created_at, mc.updated_at FROM mlkshk_conversation mc WHERE mc.user_id=%s""",
        user_id)

    # favorites
    logger.info("- favorite records")
    db.execute(
        """INSERT IGNORE INTO favorite (id, user_id, sharedfile_id, deleted, created_at, updated_at) SELECT mf.id, mf.user_id, mf.sharedfile_id, mf.deleted, mf.created_at, mf.updated_at FROM mlkshk_favorite mf WHERE mf.user_id=%s AND mf.deleted=0""",
        user_id)

    # invitation_request
    logger.info("- invitation_request records")
    db.execute(
        """INSERT IGNORE INTO invitation_request (id, user_id, manager_id, shake_id, deleted, created_at, updated_at) SELECT mi.id, mi.user_id, mi.manager_id, mi.shake_id, mi.deleted, mi.created_at, mi.updated_at FROM mlkshk_invitation_request mi WHERE mi.user_id=%s AND mi.deleted=0""",
        user_id)

    # notification
    logger.info("- notification records")
    db.execute(
        """INSERT IGNORE INTO notification (id, sender_id, receiver_id, action_id, type, deleted, created_at) SELECT mn.id, mn.sender_id, mn.receiver_id, mn.action_id, mn.type, mn.deleted, mn.created_at FROM mlkshk_notification mn WHERE mn.receiver_id=%s AND mn.deleted=0""",
        user_id)

    # shake
    logger.info("- shake records")
    db.execute("""UPDATE shake SET deleted=0 WHERE user_id=%s""", user_id)

    # shake_manager
    logger.info("- shake_manager records")
    db.execute(
        """INSERT IGNORE INTO shake_manager (id, shake_id, user_id, deleted, created_at, updated_at) SELECT ms.id, ms.shake_id, ms.user_id, ms.deleted, ms.created_at, ms.updated_at FROM mlkshk_shake_manager ms WHERE ms.user_id=%s AND ms.deleted=0""",
        user_id)

    # shakesharedfile
    logger.info("- shakesharedfile records")
    db.execute(
        """INSERT IGNORE INTO shakesharedfile (id, shake_id, sharedfile_id, deleted, created_at) SELECT ms.id, ms.shake_id, ms.sharedfile_id, ms.deleted, ms.created_at FROM mlkshk_shakesharedfile ms WHERE ms.shake_id IN (SELECT DISTINCT id FROM shake WHERE deleted=0 AND user_id=%s) AND ms.deleted=0""",
        user_id)

    # sharedfile
    logger.info("- sharedfile records")
    db.execute(
        """INSERT IGNORE INTO sharedfile (id, source_id, user_id, name, title, source_url, description, share_key, content_type, size, activity_at, parent_id, original_id, deleted, like_count, save_count, view_count, updated_at, created_at) SELECT ms.id, ms.source_id, ms.user_id, ms.name, ms.title, ms.source_url, ms.description, ms.share_key, ms.content_type, ms.size, ms.activity_at, ms.parent_id, ms.original_id, ms.deleted, ms.like_count, ms.save_count, ms.view_count, ms.updated_at, ms.created_at FROM mlkshk_sharedfile ms WHERE ms.user_id=%s AND ms.deleted=0""",
        user_id)

    # nsfw_log
    logger.info("- nsfw_log records")
    db.execute(
        """INSERT IGNORE INTO nsfw_log (id, user_id, sharedfile_id, sourcefile_id, created_at) SELECT mn.id, mn.user_id, mn.sharedfile_id, mn.sourcefile_id, mn.created_at FROM mlkshk_nsfw_log mn WHERE mn.user_id=%s""",
        user_id)

    # magicfile
    logger.info("- magicfile records")
    db.execute(
        """INSERT IGNORE INTO magicfile (id, sharedfile_id, created_at) SELECT mm.id, mm.sharedfile_id, mm.created_at FROM mlkshk_magicfile mm WHERE mm.sharedfile_id IN (SELECT id FROM sharedfile WHERE user_id=%s)""",
        user_id)

    # subscription
    logger.info("- subscription records")
    db.execute(
        """INSERT IGNORE INTO subscription (id, user_id, shake_id, deleted, created_at, updated_at) SELECT ms.id, ms.user_id, ms.shake_id, ms.deleted, ms.created_at, ms.updated_at FROM mlkshk_subscription ms WHERE ms.user_id=%s AND ms.deleted=0""",
        user_id)

    # tagged_file
    logger.info("- tagged_file records")
    db.execute(
        """INSERT IGNORE INTO tagged_file (id, tag_id, sharedfile_id, deleted, created_at) SELECT mt.id, mt.tag_id, mt.sharedfile_id, mt.deleted, mt.created_at FROM mlkshk_tagged_file mt WHERE mt.sharedfile_id IN (SELECT DISTINCT id FROM sharedfile WHERE user_id=%s) AND mt.deleted=0""",
        user_id)

    # special handling for post table migration since that thing is so large (300mm rows)
    logger.info("- post records")
    db.execute(
        """INSERT IGNORE INTO post (id, user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) SELECT mp.id, mp.user_id, mp.sourcefile_id, mp.sharedfile_id, mp.seen, mp.deleted, mp.shake_id, mp.created_at FROM mlkshk_post mp WHERE mp.user_id=%s""",
        user_id)
    # now delete any imported soft-deleted rows
    db.execute("""DELETE FROM post WHERE user_id=%s AND deleted=1""", user_id)

    # this should already be done by the web app, but we may running this
    # via a script
    logger.info("- user/migration_state update")
    db.execute("""UPDATE user SET deleted=0 WHERE deleted=2 and id=%s""",
               user_id)
    db.execute("""UPDATE migration_state SET is_migrated=1 WHERE user_id=%s""",
               user_id)

    logger.info("- Migration for user %s complete!" % str(user_id))

    user = db.get("SELECT name, email FROM user WHERE id=%s", user_id)

    db.close()

    if options.postmark_api_key and not options.debug_workers:
        pm = postmark.PMMail(
            api_key=options.postmark_api_key,
            sender="*****@*****.**",
            to=user["email"],
            subject="MLTSHP restore has finished!",
            text_body=
            """We'll keep this short and sweet. Your data should be available now!\n\n"""
            +
            ("""Head over to: https://mltshp.com/user/%s and check it out!""" %
             user["name"]))
        pm.send()
Exemple #36
0
def update_db(info):
    """Automate server audit into Racktables"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # get object_type_id
    for item in db.query("select * from Dictionary where dict_value='{0}'".format(info["server_type"])):
        object_type_id = item.dict_key

    # delete object if already exists
    delete_object(info)

    # create object
    url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format(rt_server)
    if info["server_type"] in ["Server", "XenServer"]:
        payload = """0_object_type_id={0}&0_object_name={1}&0_object_label=&0_object_asset_no={1}&got_fast_data=Go%21""".format(
            object_type_id, info["hostname"]
        )
    if info["server_type"] in ["VM", "EC2"]:
        payload = """virtual_objects=&0_object_type_id={0}&0_object_name={1}&got_fast_data=Go%21""".format(
            object_type_id, info["hostname"]
        )

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to create object: {0}".format(info["hostname"])
        return False
    else:
        print "OK - Created object: {0}".format(info["hostname"])

    object_id = ""
    # get object_id
    for item in db.query("select * from Object where name='{0}'".format(info["hostname"])):
        object_id = item.id
    if not object_id:
        print "Failed to get object_id"
        return False

    # get os_release_id
    os_release_key = ""
    for item in db.query("select * from Dictionary where dict_value='{0}'".format(info["os_release"])):
        os_release_key = item.dict_key
    if not os_release_key:
        print "Failed to get object_type_id, please add '{0}' to 'Configuration - Dictionary - Server OS type'.".format(
            info["os_release"]
        )
        return False

    # update the informations of object, all post data formats were got by firebug on firefox
    url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=edit&op=update""".format(rt_server)
    if info["server_type"] == "Server":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no={2}&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=2&4_value=0&5_attr_id=26&5_value=0&6_attr_id=10006&6_value={6}\
&7_attr_id=10003&7_value={7}&8_attr_id=1&8_value=&9_attr_id=28&9_value=&10_attr_id=21&10_value=&11_attr_id=4&11_value={8}\
&12_attr_id=24&12_value=&13_attr_id=10005&13_value={9}&14_attr_id=25&14_value=&num_attrs=15&object_comment=&submit.x=15&submit.y=13""".format(
            object_id,
            object_type_id,
            info["hostname"],
            info["cpu_cores"],
            quote_plus(info["disk"]),
            info["fqdn"],
            info["memory"],
            quote_plus(info["network"]),
            os_release_key,
            info["swap"],
        )
    if info["server_type"] == "XenServer":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no={2}&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=1&7_value=&8_attr_id=28&8_value=&9_attr_id=4&9_value={8}&10_attr_id=24&10_value=&11_attr_id=10005&11_value={9}\
&12_attr_id=25&12_value=&13_attr_id=10008&13_value={10}&num_attrs=14&object_comment=&submit.x=18&submit.y=21""".format(
            object_id,
            object_type_id,
            info["hostname"],
            info["cpu_cores"],
            quote_plus(info["disk"]),
            info["fqdn"],
            info["memory"],
            quote_plus(info["network"]),
            os_release_key,
            info["swap"],
            quote_plus(info["vm_list"]),
        )
    if info["server_type"] == "EC2":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no=&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=10010&7_value={8}&8_attr_id=4&8_value={9}&9_attr_id=24&9_value=&10_attr_id=10005&10_value={10}&num_attrs=11&object_comment=&submit.x=19&submit.y=27""".format(
            object_id,
            object_type_id,
            info["hostname"],
            info["cpu_cores"],
            quote_plus(info["disk"]),
            info["fqdn"],
            info["memory"],
            quote_plus(info["network"]),
            info["ec2_pubname"],
            os_release_key,
            info["swap"],
        )
    if info["server_type"] == "VM":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no=&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=10007&7_value={8}&8_attr_id=4&8_value={9}&9_attr_id=24&9_value=&10_attr_id=10005&10_value={10}&num_attrs=11&object_comment=&submit.x=25&submit.y=14'""".format(
            object_id,
            object_type_id,
            info["hostname"],
            info["cpu_cores"],
            quote_plus(info["disk"]),
            info["fqdn"],
            info["memory"],
            quote_plus(info["network"]),
            info["resident_on"],
            os_release_key,
            info["swap"],
        )

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to update attributes"
        return False
    print "OK - Updated attributes"

    # ec2 servers don't need to update the ip pool
    if info["server_type"] not in ["EC2"]:
        # update the ip pool
        nics = ("".join(info["network"].split())).split(",")
        for i in nics:
            nic_info = i.split(":")
            nic_name = "".join(nic_info[0:1])
            nic_addr = "".join(nic_info[1:2])
            # check if nic_name is not correct
            if nic_name.isalnum():
                # create nic
                url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=ip&op=add""".format(rt_server)
                payload = """object_id={0}&bond_name={1}&ip={2}&bond_type=regular&submit.x=11&submit.y=6""".format(
                    object_id, nic_name, nic_addr
                )
                req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
                if req.status_code != requests.codes.ok:
                    print "Failed to update ip pool for {0}:{1}".format(nic_name, nic_addr)
                    return False
                print "OK - Updated ip pool for {0}:{1}".format(nic_name, nic_addr)

    # virtual servers don't need to update the rackspace
    if info["server_type"] not in ["EC2", "VM"]:
        # update rack info
        update_rack(info, object_id)

    # close db
    db.close()

    # end
    return True
Exemple #37
0
    def run(self):
        # init our varying wrappers
        self.config = self.config_parser

        rabbit_queue = self.conf('rabbitqueue')
        rabbit_requeue = self.conf('rabbitrequeue')
        rabbitmq_url = self.conf('rabbitmq_url')

        client = puka.Client(rabbitmq_url)
        promise = client.connect()
        client.wait(promise)

        promise = client.queue_declare(queue=rabbit_queue, durable=True)
        client.wait(promise)
        if rabbit_requeue:
            promise = client.queue_declare(queue=rabbit_requeue, durable=True)
            client.wait(promise)
 
        consume_promise = client.basic_consume(queue=rabbit_queue, prefetch_count=1)

        host = self.conf('mysql_host')
        user = self.conf('mysql_user')
        password = self.conf('mysql_password')
        database = self.conf('mysql_database')
        table = self.conf('mysql_table')
        mysql_query_fields = self.conf('mysql_query_fields').split(",")
        mysql_key_field = self.conf('mysql_key_field')
        mysql_field_length = self.conf('mysql_field_length')
        mysql_insert_query = self.conf('mysql_insert_query')

        mysqldb = Connection(host, database, user=user, password=password)

        while True:
            try:
                result = client.wait(consume_promise)
                payload = json.loads(result['body'])
                key = payload[mysql_key_field]

                # Do some processing to get all they key/val pairs
                for k, v in payload['body'].iteritems():
                    values = []

                    for field in mysql_query_fields:
                        if isinstance(payload[field], basestring):
                            values.append(payload[field].strip())
                        else:
                            values.append(payload[field])
                    if k == 'body':
                        values.append(key)
                    else:
                        values.append(k)
                    values.append(v)
                    logger.debug(k)
                    if len(v) > 0 and len(v) < int(mysql_field_length):
                        try:
                            mysqldb.executemany(mysql_insert_query, [values])
                        except Exception as e:
                            logger.error(e)

                client.basic_ack(result)
                
                if rabbit_requeue:
                    promise = client.basic_publish(exchange='', routing_key=rabbit_requeue, body=result['body'])
                    client.wait(promise)
            except KeyboardInterrupt as e:
                logger.error(e)
                promise = client.close()
                client.wait(promise)
                mysqldb.close()
                raise
def start_schedule():

#if __name__ == '__main__':
    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    
    scheduler = Scheduler(daemonic = False)

    scheduler.print_jobs()

    #scheduler.remove_jobstore('file',close=True)

    #scheduler.shutdown(wait=False)

    

    scheduler.shutdown()

    #scheduler.unschedule_func(backup)

    scheduler.add_jobstore(ShelveJobStore('/tmp/db_schedule'), 'file')

    v_current_jobs = scheduler.get_jobs()

    print v_current_jobs

    if v_current_jobs:  # 如果job存在的话,先请客

     

        scheduler.unschedule_func(backup)

    

    #scheduler = Scheduler(standalone=True)
    #scheduler = Scheduler(daemon=True)
    #连接配置中心库,获取数据库备份周期等信息
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id """

    print v_sql

    bak_server_list = db.query(v_sql)

    if bak_server_list: # 有server需要配置

        i=0

        # 把还没有开始的调度任务,置为手工结束 backup_result_type=4
        v_manual_end_sql = 'update mysql_ins_bak_log set backup_result_type=4 where backup_result_type=0'

        db.execute(v_manual_end_sql)

        for bak_server in bak_server_list:

            instance_id = bak_server['instance_id']

            from_host = bak_server['ip']

            #print from_host

            mysql_port = bak_server['port']

            backup_interval_type = bak_server['backup_interval_type']

            backup_start_time = bak_server['backup_start_time']

            str_start_date= time.strftime("%Y-%m-%d") + ' ' + backup_start_time

            print str_start_date 

            if backup_interval_type == 1: # every day

                #内存jobstore

                #scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

                #文件jobstore jobstore='file'

                scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')
               
                #scheduler.add_interval_job(backup, days=1, start_date='2014-07-18 18:17:01', args=[from_host, mysql_port, os_user, os_password])

            elif backup_interval_type == 2: # every week

                scheduler.add_interval_job(backup, weeks=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

            elif backup_interval_type == 3: # every hour

                scheduler.add_interval_job(backup, hours=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

            # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

            v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
            values(%d,0)""" % (instance_id)

            db.execute(v_sche_start_sql)

            i=i+1


    db.close()


    if bak_server_list: # 有server需要配置

        scheduler.start()

        print 'success!'

        scheduler.print_jobs()

        '''
Exemple #39
0
def update_db(info):
    """Automate server audit into Racktables"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # get object_type_id
    for item in db.query(
            "select * from Dictionary where dict_value='{0}'".format(
                info['server_type'])):
        object_type_id = item.dict_key

    # delete object if already exists
    delete_object(info)

    # create object
    url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format(
        rt_server)
    if info['server_type'] in ["Server", "XenServer"]:
        payload = """0_object_type_id={0}&0_object_name={1}&0_object_label=&0_object_asset_no={1}&got_fast_data=Go%21"""\
                  .format(object_type_id,info['hostname'])
    if info['server_type'] in ["VM", "EC2"]:
        payload = """virtual_objects=&0_object_type_id={0}&0_object_name={1}&got_fast_data=Go%21"""\
                  .format(object_type_id,info['hostname'])

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to create object: {0}".format(info['hostname'])
        return False
    else:
        print "OK - Created object: {0}".format(info['hostname'])

    object_id = ""
    # get object_id
    for item in db.query("select * from Object where name='{0}'".format(
            info['hostname'])):
        object_id = item.id
    if not object_id:
        print "Failed to get object_id"
        return False

    # get os_release_id
    os_release_key = ""
    for item in db.query(
            "select * from Dictionary where dict_value='{0}'".format(
                info['os_release'])):
        os_release_key = item.dict_key
    if not os_release_key:
        print "Failed to get object_type_id, please add '{0}' to 'Configuration - Dictionary - Server OS type'.".format(
            info['os_release'])
        return False

    # update the informations of object, all post data formats were got by firebug on firefox
    url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=edit&op=update""".format(
        rt_server)
    if info['server_type'] == "Server":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no={2}&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=2&4_value=0&5_attr_id=26&5_value=0&6_attr_id=10006&6_value={6}\
&7_attr_id=10003&7_value={7}&8_attr_id=1&8_value=&9_attr_id=28&9_value=&10_attr_id=21&10_value=&11_attr_id=4&11_value={8}\
&12_attr_id=24&12_value=&13_attr_id=10005&13_value={9}&14_attr_id=25&14_value=&num_attrs=15&object_comment=&submit.x=15&submit.y=13"""\
                  .format(object_id,object_type_id,info['hostname'],info['cpu_cores'],quote_plus(info['disk']),info['fqdn'],
                          info['memory'],quote_plus(info['network']),os_release_key,info['swap'])
    if info['server_type'] == "XenServer":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no={2}&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=1&7_value=&8_attr_id=28&8_value=&9_attr_id=4&9_value={8}&10_attr_id=24&10_value=&11_attr_id=10005&11_value={9}\
&12_attr_id=25&12_value=&13_attr_id=10008&13_value={10}&num_attrs=14&object_comment=&submit.x=18&submit.y=21"""\
                  .format(object_id,object_type_id,info['hostname'],info['cpu_cores'],quote_plus(info['disk']),info['fqdn'],
                          info['memory'],quote_plus(info['network']),os_release_key,info['swap'],quote_plus(info['vm_list']))
    if info['server_type'] == "EC2":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no=&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=10010&7_value={8}&8_attr_id=4&8_value={9}&9_attr_id=24&9_value=&10_attr_id=10005&10_value={10}&num_attrs=11&object_comment=&submit.x=19&submit.y=27"""\
                  .format(object_id,object_type_id,info['hostname'],info['cpu_cores'],quote_plus(info['disk']),info['fqdn'],
                          info['memory'],quote_plus(info['network']),info['ec2_pubname'],os_release_key,info['swap'])
    if info['server_type'] == "VM":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no=&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=10007&7_value={8}&8_attr_id=4&8_value={9}&9_attr_id=24&9_value=&10_attr_id=10005&10_value={10}&num_attrs=11&object_comment=&submit.x=25&submit.y=14'"""\
                  .format(object_id,object_type_id,info['hostname'],info['cpu_cores'],quote_plus(info['disk']),info['fqdn'],
                          info['memory'],quote_plus(info['network']),info['resident_on'],os_release_key,info['swap'])

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to update attributes"
        return False
    print "OK - Updated attributes"

    # ec2 servers don't need to update the ip pool
    if info['server_type'] not in ["EC2"]:
        # update the ip pool
        nics = ("".join(info['network'].split())).split(',')
        for i in nics:
            nic_info = i.split(':')
            nic_name = "".join(nic_info[0:1])
            nic_addr = "".join(nic_info[1:2])
            # check if nic_name is not correct
            if nic_name.isalnum():
                # create nic
                url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=ip&op=add""".format(
                    rt_server)
                payload = """object_id={0}&bond_name={1}&ip={2}&bond_type=regular&submit.x=11&submit.y=6"""\
                          .format(object_id,nic_name,nic_addr)
                req = requests.post(url,
                                    data=payload,
                                    headers=rt_headers,
                                    auth=rt_auth)
                if req.status_code != requests.codes.ok:
                    print "Failed to update ip pool for {0}:{1}".format(
                        nic_name, nic_addr)
                    return False
                print "OK - Updated ip pool for {0}:{1}".format(
                    nic_name, nic_addr)

    # virtual servers don't need to update the rackspace
    if info['server_type'] not in ["EC2", "VM"]:
        # update rack info
        update_rack(info, object_id)

    # close db
    db.close()

    # end
    return True
Exemple #40
0
def upload_processlist():

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT id,ip,port from tag b where online_flag=1 and is_showprocesslist=1"""

    #print v_sql

    upload_server_list = db.query(v_sql)

    if upload_server_list: # 对实例表进行循环

        i=0

        

        for upload_server in upload_server_list:

            instance_id = upload_server['id']

            host_ip = upload_server['ip']

            mysql_port = upload_server['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')


            v_pl = r"""SELECT ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,TIME_MS,ROWS_SENT,ROWS_EXAMINED from PROCESSLIST"""

            #print v_pl

            process_list = db_remote.query(v_pl)

            for process_row in process_list:

                vp_id = process_row['ID']

                vp_user = process_row['USER']

                vp_host = process_row['HOST']

                vp_db = process_row['DB']

                # 若是空,变成mysql的null,否则加上引号再传递进去,格式为%s ,而不是'%s'
                if vp_db is None:
                    vp_db = 'NULL'
                else:
                    vp_db = "'"+vp_db+"'"

                #print vp_db

                vp_command = process_row['COMMAND']

                vp_time = process_row['TIME']

                vp_state = process_row['STATE']

                vp_info = process_row['INFO']

                if vp_info is None:
                    vp_info = 'NULL'
                else:
                    #vp_info = "'"+vp_info+"'"
                    vp_info = vp_info.replace('"',"'") # 双引号替换为单引号
                    vp_info = '"'+vp_info+'"'   # 防止字符里面本身包含单引号

                vp_time_ms = process_row['TIME_MS']

                vp_rows_sent = process_row['ROWS_SENT']

                vp_rows_examined = process_row['ROWS_EXAMINED']

                # v_insert_sql='''insert into log_processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                # TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s','%s','%s',%d,'%s','%s',%d,%d,%d)''' % (
                # instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)
                
                #try:
                v_insert_sql='''insert into log_processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s',%s,'%s',%d,'%s',%s,%d,%d,%d)''' % (
                instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)


                #print v_insert_sql

                db.execute(v_insert_sql.replace('%','%%'))

                    #db.execute(v_insert_sql)
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert process_list error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            db_remote.close()

            

            i=i+1


    db.close()
Exemple #41
0
def upload_meta_tables():  
    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    db.execute('truncate table meta_tables')  # 先truncate 再插入

    db.execute('truncate table meta_statistics')  # 先truncate 再插入

    db.execute('truncate table meta_redundant_keys')  # 先truncate 再插入

    # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64
    v_sql = r"""SELECT b.ip,b.port,b.id as instance_id,a.id as schema_id,a.name as db_name from 
    resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id"""

    #print v_sql

    upload_tables_list = db.query(v_sql)

    if upload_tables_list: # 对实例表进行循环

        i=0

        

        for upload_table in upload_tables_list:

            instance_id = upload_table['instance_id']

            schema_id = upload_table['schema_id']

            db_name = upload_table['db_name']

            host_ip = upload_table['ip']

            mysql_port = upload_table['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')

            # 取表的元信息然后插入

            v_pl = r"""SELECT table_catalog,table_schema,table_name,table_type,engine,version,row_format,
            table_rows,avg_row_length,data_length,max_data_length,index_length,data_free,auto_increment,create_time,
            update_time,check_time,table_collation,checksum,
            create_options,table_comment from tables where table_type='BASE TABLE' and TABLE_SCHEMA='%s' """ % (db_name)

            #print v_pl

            table_list = db_remote.query(v_pl)

            for table_row in table_list:

                table_catalog = table_row['table_catalog']

                table_schema = table_row['table_schema']

                table_name = table_row['table_name']

                table_type = table_row['table_type']

                engine = table_row['engine']


                version = table_row['version']

                row_format = table_row['row_format']

                table_rows = table_row['table_rows']

                avg_row_length = table_row['avg_row_length']

                data_length = table_row['data_length']

                max_data_length = table_row['max_data_length']

                index_length = table_row['index_length']

                data_free = table_row['data_free']

                auto_increment = table_row['auto_increment']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入
                if auto_increment is None:
                    auto_increment = 'NULL'
                else:
                    auto_increment = "'"+str(auto_increment)+"'"

                # 日期型插入,由于要处理null值插入,经过处理后,以%s 进行插入
                create_time = table_row['create_time']

                if create_time is None:
                    create_time = 'NULL'
                else:
                    create_time = "'"+str(create_time)+"'"
                

                # 日期型插入,由于要处理null值插入,经过处理后,以%s 进行插入
                update_time = table_row['update_time']

                if update_time is None:
                    update_time = 'NULL'
                else:
                    update_time = "'"+str(update_time)+"'"


                check_time = table_row['check_time']

                if check_time is None:
                    check_time = 'NULL'
                else:
                    check_time = "'"+str(check_time)+"'"


                table_collation = table_row['table_collation']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入

                checksum = table_row['checksum']

                if checksum is None:
                    checksum = 'NULL'
                else:
                    checksum = "'"+str(checksum)+"'"
                


                create_options = table_row['create_options']

                table_comment = table_row['table_comment']

                
                #try:
                v_insert_sql='''insert into meta_tables(instance_id,schema_id,table_catalog,table_schema,table_name,
                table_type,engine,version,row_format,table_rows,avg_row_length,data_length,max_data_length,
                index_length,data_free,auto_increment,create_time,update_time,check_time,table_collation,
                checksum,create_options,table_comment) 
                values(%d,%d,'%s','%s','%s',
                '%s','%s',%d,'%s',%d,%d,%d,%d,
                %d,%d,%s,%s,%s,%s,'%s',
                %s,'%s','%s')''' % (
                instance_id,schema_id,table_catalog,table_schema,table_name,
                table_type,engine,version,row_format,table_rows,avg_row_length,data_length,max_data_length,
                index_length,data_free,auto_increment,create_time,update_time,check_time,table_collation,
                checksum,create_options,table_comment
                )

                #print v_insert_sql

                db.execute(v_insert_sql.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)


            # 取索引的元信息,然后插入

            v_pl2 = r"""SELECT table_catalog,table_schema,table_name,non_unique,index_schema,
            index_name,seq_in_index,column_name,collation,cardinality,sub_part,packed,nullable,
            index_type,comment,index_comment from statistics where TABLE_SCHEMA='%s' """ % (db_name)


            table_list2 = db_remote.query(v_pl2)

            for table_row2 in table_list2:

                table_catalog = table_row2['table_catalog']

                table_schema = table_row2['table_schema']

                table_name = table_row2['table_name']

                non_unique = table_row2['non_unique']

                index_schema = table_row2['index_schema']


                index_name = table_row2['index_name']

                seq_in_index = table_row2['seq_in_index']

                column_name = table_row2['column_name']

                collation = table_row2['collation']

                cardinality = table_row2['cardinality']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入

                if cardinality is None:
                    cardinality = 'NULL'
                else:
                    cardinality = "'"+str(cardinality)+"'"


                sub_part = table_row2['sub_part']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入

                if sub_part is None:
                    sub_part = 'NULL'
                else:
                    sub_part = "'"+str(sub_part)+"'"


                packed = table_row2['packed']

                if packed is None:
                    packed = 'NULL'
                else:
                    
                    packed = '"'+packed+'"'   # 防止字符里面本身包含单引号


                nullable = table_row2['nullable']
                
                index_type = table_row2['index_type']

                comment = table_row2['comment']

                index_comment = table_row2['index_comment']

                

                
                #try:
                v_insert_sql2='''insert into meta_statistics(instance_id,schema_id,table_catalog,
                table_schema,table_name,non_unique,index_schema,index_name,seq_in_index,
                column_name,collation,cardinality,sub_part,packed,nullable,index_type,comment,index_comment) 
                values(%d,%d,'%s',
                '%s','%s',%d,'%s','%s',%d,
                '%s','%s',%s,%s,%s,'%s','%s','%s','%s')''' % (
                instance_id,schema_id,table_catalog,
                table_schema,table_name,non_unique,index_schema,index_name,seq_in_index,
                column_name,collation,cardinality,sub_part,packed,nullable,index_type,comment,index_comment
                )

                #print v_insert_sql

                db.execute(v_insert_sql2.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            db_remote.close()


            #连接远程实例,提取冗余索引信息
            db_remote2 = Connection(v_host,
                            'common_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')

            # 取common_schema.redundant_keys 视图信息然后插入

            v_pl3 = r"""SELECT table_schema,table_name,redundant_index_name,redundant_index_columns,
            redundant_index_non_unique,dominant_index_name,dominant_index_columns,
            dominant_index_non_unique,subpart_exists,sql_drop_index 
            from redundant_keys where table_schema='%s' """ % (db_name)

            #print v_pl

            table_list3 = db_remote2.query(v_pl3)

            for table_row3 in table_list3:

            

                table_schema = table_row3['table_schema']

                table_name = table_row3['table_name']

                redundant_index_name = table_row3['redundant_index_name']

                redundant_index_columns = table_row3['redundant_index_columns']


                redundant_index_non_unique = table_row3['redundant_index_non_unique']

                dominant_index_name = table_row3['dominant_index_name']

                dominant_index_columns = table_row3['dominant_index_columns']

                dominant_index_non_unique = table_row3['dominant_index_non_unique']


                subpart_exists = table_row3['subpart_exists']

                sql_drop_index = table_row3['sql_drop_index']

                
                
                #try:
                v_insert_sql3='''insert into meta_redundant_keys(instance_id,schema_id,table_schema,
                table_name,redundant_index_name,redundant_index_columns,redundant_index_non_unique,
                dominant_index_name,dominant_index_columns,
                dominant_index_non_unique,subpart_exists,sql_drop_index) 
                values(%d,%d,'%s',
                '%s','%s','%s',%d,
                '%s','%s',
                %d,%d,'%s')''' % (
                instance_id,schema_id,table_schema,
                table_name,redundant_index_name,redundant_index_columns,redundant_index_non_unique,
                dominant_index_name,dominant_index_columns,
                dominant_index_non_unique,subpart_exists,sql_drop_index
                )

                #print v_insert_sql

                db.execute(v_insert_sql3.replace('%','%%'))


            db_remote2.close()

            

            i=i+1


    db.close()
Exemple #42
0
def upload_user_stats():  

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    db.execute('truncate table meta_table_statistics')  # 先truncate 再插入

    db.execute('truncate table meta_index_statistics')  # 先truncate 再插入

    


    # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64
    v_sql = r"""SELECT b.ip,b.port,b.id as instance_id,a.id as schema_id,a.name as db_name from 
    resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id"""

    #print v_sql

    upload_tables_list = db.query(v_sql)

    if upload_tables_list: # 对实例表进行循环

        i=0

        

        for upload_table in upload_tables_list:

            instance_id = upload_table['instance_id']

            schema_id = upload_table['schema_id']

            db_name = upload_table['db_name']

            host_ip = upload_table['ip']

            mysql_port = upload_table['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')

            # 取表的元信息然后插入

            v_pl = r"""SELECT table_schema,table_name,rows_read,rows_changed,rows_changed_x_indexes 
            from table_statistics where table_schema='%s' """ % (db_name)

            #print v_pl

            table_list = db_remote.query(v_pl)

            for table_row in table_list:

            

                table_schema = table_row['table_schema']

                table_name = table_row['table_name']

                rows_read = table_row['rows_read']

                rows_changed = table_row['rows_changed']


                rows_changed_x_indexes = table_row['rows_changed_x_indexes']

                
                
                #try:
                v_insert_sql='''insert into meta_table_statistics(instance_id,schema_id,table_schema,
                table_name,rows_read,rows_changed,rows_changed_x_indexes) 
                values(%d,%d,'%s','%s',%d,%d,%d)''' % (
                instance_id,schema_id,table_schema,
                table_name,rows_read,rows_changed,rows_changed_x_indexes
                )

                #print v_insert_sql

                db.execute(v_insert_sql.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            

            # 取索引的元信息,然后插入

            v_pl2 = r"""SELECT table_schema,table_name,index_name,rows_read from index_statistics 
            where TABLE_SCHEMA='%s' """ % (db_name)


            table_list2 = db_remote.query(v_pl2)

            for table_row2 in table_list2:

                

                table_schema = table_row2['table_schema']

                table_name = table_row2['table_name']

                index_name = table_row2['index_name']

                
                rows_read = table_row2['rows_read']

                
                #try:
                v_insert_sql2='''insert into meta_index_statistics(instance_id,schema_id,table_schema,
                table_name,index_name,rows_read) 
                values(%d,%d,'%s','%s','%s',%d)''' % (
                instance_id,schema_id,table_schema,
                table_name,index_name,rows_read
                )

                #print v_insert_sql

                db.execute(v_insert_sql2.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)



            db_remote.close()


            # 开始生成log_hot_tables 统计表数据

            # statistic_time 为统计数据的实际生成时间


            v_insert_sql3='''insert into log_hot_tables(instance_id,schema_id,table_schema,
                table_name,TABLE_COMMENT,TABLE_CREATE_TIME,rows_read,rows_changed,
                rows_changed_x_indexes) select a.instance_id,a.schema_id,a.TABLE_SCHEMA,
                a.TABLE_NAME,b.TABLE_COMMENT,b.CREATE_TIME,a.ROWS_READ,a.ROWS_CHANGED,
                a.ROWS_CHANGED_X_INDEXES from meta_table_statistics a,meta_tables b where 
                a.instance_id=%d and a.schema_id=%d and 
                a.TABLE_SCHEMA='%s' and a.instance_id=b.instance_id and a.TABLE_SCHEMA=b.TABLE_SCHEMA 
                and a.TABLE_NAME=b.TABLE_NAME''' % (
                instance_id,schema_id,table_schema
                )

            print v_insert_sql3

            db.execute(v_insert_sql3.replace('%','%%'))
            


            

            i=i+1


    db.close()

   # 开始远程flush 统计信息表

    # for remote execute

    os_user = '******'

    OS_APPS_PASSWD = config.OS_APPS_PASSWD

    DB_USER = config.DB_USER

    DB_PASSWD = config.DB_PASSWD

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64
    v_sql = r"""SELECT distinct b.ip,b.port from 
    resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id"""

    #print v_sql

    server_list = db.query(v_sql)

    if server_list: # 对实例表进行循环

        i=0

        for single_server in server_list:


            host_ip = single_server['ip']

            mysql_port = single_server['port']


            # 远程paramiko调用 在本机执行sql

            exe_sql = 'flush table_statistics;flush index_statistics;'  

            result = func.remote_exe_sql(host_ip,os_user,OS_APPS_PASSWD,mysql_port,'information_schema',exe_sql,DB_USER,DB_PASSWD)

            if result == '':
                result = '执行成功!'

            print result
def upload_processlist():

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT id,ip,port from tag b where online_flag=1 and is_showprocesslist=1"""

    #print v_sql

    upload_server_list = db.query(v_sql)

    if upload_server_list: # 对实例表进行循环

        i=0

        

        for upload_server in upload_server_list:

            instance_id = upload_server['id']

            host_ip = upload_server['ip']

            mysql_port = upload_server['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')


            v_pl = r"""SELECT ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,TIME_MS,ROWS_SENT,ROWS_EXAMINED from PROCESSLIST"""

            #print v_pl

            process_list = db_remote.query(v_pl)

            for process_row in process_list:

                vp_id = process_row['ID']

                vp_user = process_row['USER']

                vp_host = process_row['HOST']

                vp_db = process_row['DB']

                vp_command = process_row['COMMAND']

                vp_time = process_row['TIME']

                vp_state = process_row['STATE']

                vp_info = process_row['INFO']

                vp_time_ms = process_row['TIME_MS']

                vp_rows_sent = process_row['ROWS_SENT']

                vp_rows_examined = process_row['ROWS_EXAMINED']

                v_insert_sql='''insert into processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s','%s','%s',%d,'%s','%s',%d,%d,%d)''' % (
                instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)

                db.execute(v_insert_sql)

            db_remote.close()

            

            i=i+1


    db.close()
def upload_auto_increment():

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT id,ip,port from tag b where online_flag=1 and is_showprocesslist=1"""

    print v_sql

    upload_server_list = db.query(v_sql)

    if upload_server_list: # 对实例表进行循环

        i=0

        

        for upload_server in upload_server_list:

            instance_id = upload_server['id']

            host_ip = upload_server['ip']

            mysql_port = upload_server['port']

            v_host =host_ip + ':' + str(mysql_port)

            print v_host

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')


            v_pl = r"""select information_schema . columns . TABLE_SCHEMA AS TABLE_SCHEMA,
                        information_schema . columns . TABLE_NAME AS TABLE_NAME,
                        information_schema . columns . COLUMN_NAME AS COLUMN_NAME,
                        information_schema . columns . DATA_TYPE AS DATA_TYPE,
                        information_schema . columns . COLUMN_TYPE AS COLUMN_TYPE,
                        ((case information_schema . columns . DATA_TYPE
                        when 'tinyint' then
                            255
                        when 'smallint' then
                            65535
                        when 'mediumint' then
                            16777215
                        when 'int' then
                            4294967295
                        when 'bigint' then
                            18446744073709551615
                        end) >>
                        if((locate('unsigned', information_schema . columns . COLUMN_TYPE) > 0),
                            0,
                            1)) AS MAX_VALUE,
                        information_schema . tables . AUTO_INCREMENT AS AUTO_INCREMENT,
                        (information_schema . tables .
                        AUTO_INCREMENT /
                        ((case information_schema . columns . DATA_TYPE
                            when 'tinyint' then
                                255
                            when 'smallint' then
                                65535
                            when 'mediumint' then
                                16777215
                            when 'int' then
                                4294967295
                            when 'bigint' then
                                18446744073709551615
                        end) >>
                        if((locate('unsigned', information_schema . columns . COLUMN_TYPE) > 0),
                            0,
                            1))) AS AUTO_INCREMENT_RATIO
                        from (information_schema . columns join information_schema . tables
                          on(((information_schema . columns . TABLE_SCHEMA = information_schema .
                               tables . TABLE_SCHEMA) and
                               (information_schema . columns . TABLE_NAME = information_schema .
                               tables . TABLE_NAME))))
                        where ((information_schema . columns .TABLE_SCHEMA not in
                                    ('mysql', 'INFORMATION_SCHEMA', 'performance_schema','common_schema')) and
                                    (information_schema . tables . TABLE_TYPE = 'BASE TABLE') and
                                    (information_schema . columns . EXTRA = 'auto_increment'))"""

            print v_pl

            process_list = db_remote.query(v_pl)

            for process_row in process_list:

                vp_server_ip = v_host

                vp_instance_id = instance_id

                #vp_id = process_row['ID']
                vp_table_schema = process_row['TABLE_SCHEMA']

                vp_table_name = process_row['TABLE_NAME']

                vp_column_name = process_row['COLUMN_NAME']

                vp_data_type = process_row['DATA_TYPE']

                # 若是空,变成mysql的null,否则加上引号再传递进去,格式为%s ,而不是'%s'
                #if vp_db is None:
                #    vp_db = 'NULL'
                #else:
                #    vp_db = "'"+vp_db+"'"

                #print vp_db

                vp_column_type = process_row['COLUMN_TYPE']

                vp_max_value = process_row['MAX_VALUE']

                vp_auto_increment = process_row['AUTO_INCREMENT']

                vp_auto_increment_ratio = process_row['AUTO_INCREMENT_RATIO']

                #vp_info = process_row['INFO']

                #if vp_info is None:
                #    vp_info = 'NULL'
                #else:
                #    #vp_info = "'"+vp_info+"'"
                #    vp_info = vp_info.replace('"',"'") # 双引号替换为单引号
                #    vp_info = '"'+vp_info+'"'   # 防止字符里面本身包含单引号

                # v_insert_sql='''insert into log_processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                # TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s','%s','%s',%d,'%s','%s',%d,%d,%d)''' % (
                # instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)
                
                #try:
                v_delete_sql='''delete from  auto_increment_list where INSTANCE_ID = '%s' and TABLE_SCHEMA = '%s' and TABLE_NAME = '%s' and COLUMN_NAME = '%s' ''' % (
                vp_instance_id,vp_table_schema,vp_table_name,vp_column_name)


                print v_delete_sql

                db.execute(v_delete_sql)


                v_insert_sql='''insert into auto_increment_list(instance_id,server_ip,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,DATA_TYPE,COLUMN_TYPE,MAX_VALUE,AUTO_INCREMENT,AUTO_INCREMENT_RATIO) 
                values(%d,'%s','%s','%s','%s','%s','%s',%d,%d,%d)''' % (
                vp_instance_id,vp_server_ip,vp_table_schema,vp_table_name,vp_column_name,vp_data_type,vp_column_type,int(vp_max_value),int(vp_auto_increment),int(vp_auto_increment_ratio))


                print v_insert_sql

                db.execute(v_insert_sql)

                    #db.execute(v_insert_sql)
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert process_list error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            db_remote.close()

            

            i=i+1

        
        v_sendmail_sql=r'''select count(*) count
                    from auto_increment_list b where AUTO_INCREMENT_RATIO >= 80 '''
        

        
        if_list = db.query(v_sendmail_sql)
        
        for if_row in if_list:
            v_if = if_row['count']
            print v_if    
            if v_if >= 1 :
                print '11'
                
                
                v_warn_sql=r'''select SERVER_IP,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,MAX_VALUE,AUTO_INCREMENT,AUTO_INCREMENT_RATIO 
                            from auto_increment_list b where AUTO_INCREMENT_RATIO >= 80 '''
                print v_warn_sql
                warn_list = db.query(v_warn_sql)
                v_server_ip = '\r\n'
                for warn_row in warn_list:
                    v_server_ip = v_server_ip + warn_row['SERVER_IP'] +'|对象名:' + warn_row['TABLE_SCHEMA'] +'|表名:'+ warn_row['TABLE_NAME'] + '|字段名:' + warn_row['COLUMN_NAME']+'\r\n'                  
                    
                print v_server_ip
                v_msg_text = v_server_ip
                v_receiver = '[email protected],[email protected]'
                v_subject = '有快溢出的自增ID,细节请去监控页面查看'
                v_return = func.send_mail_to_devs(v_receiver,v_subject,v_msg_text)

    db.close()