Exemple #1
0
def main():
    db1 = Connection(options.database_host, options.database_user,
                     options.database_password)

    #grab all shared files in order
    sfs = db1.query("""SELECT id FROM sharedfile ORDER BY created_at""")
    #for each, get counts

    for sf in sfs:
        likes = 0
        saves = 0

        like_counts = db1.query(
            "SELECT count(id) as like_count from favorite where sharedfile_id = %s and deleted=0",
            (sf.id))
        if like_counts:
            likes = like_counts[0]['like_count']

        save_counts = db1.query(
            "SELECT count(id) AS save_count FROM sharedfile WHERE original_id = %s and deleted = 0",
            sf.id)
        if save_counts[0]['save_count'] > 0:
            saves = save_counts[0]['save_count']
        else:
            save_counts = db1.query(
                "SELECT count(id) AS save_count FROM sharedfile WHERE parent_id = %s and deleted = 0",
                sf.id)
            saves = save_counts[0]['save_count']

        if likes > 0 or saves > 0:
            print "UPDATE sharedfile SET like_count = %s, save_count = %s WHERE id = %s" % (
                likes, saves, sf.id)
            print db1.execute(
                "UPDATE sharedfile SET like_count = %s, save_count = %s WHERE id = %s",
                likes, saves, sf.id)
Exemple #2
0
    def get(self, article_id):
        # template_name = "article_details.html"
        template_name = "mobile/article_details.html"

        db = Connection(settings.DATABASE_SERVER,
                        settings.DATABASE_NAME,
                        settings.DATABASE_USER,
                        settings.DATABASE_PASSWORD,
                        )

        sql = "SELECT * FROM articles WHERE id='{0}'".format(
            article_id)
        article = db.query(sql)[0]

        # article["read_count"], article["comment_count"] = \
        #     get_article_statistics(db, article_id)
        article["url"] = urllib.quote(article["url"])

        # Update article read count
        now = datetime.datetime.now()
        sql = """INSERT INTO article_reads (`article_id`, `user_id`, `time`)
                 VALUES ('{0}', '{1}', '{2}')
              """.format(article_id, 0, now)
        db.execute(sql)

        kwargs = dict(article=article,
                      day=article["day"])

        super(ArticleDetailsHandler, self).render(
            template_name,
            **kwargs
        )
Exemple #3
0
def main():
    db1 = Connection(options.database_host, options.database_user, options.database_password)

    db1.execute("DELETE FROM post WHERE 1")
    ssfs = db1.query("""SELECT shake_id, sharedfile_id from shakesharedfile order by created_at""")
    for shakesharedfile in ssfs:
        sf = db1.get("""SELECT id, source_id, name, deleted, created_at FROM sharedfile WHERE id = %s""", shakesharedfile['sharedfile_id'])
        print "%s. Adding posts for sharedfile: %s created at %s." % (sf.id, sf.name, sf.created_at)
        add_posts(shake_id=shakesharedfile['shake_id'], sharedfile_id=sf['id'], sourcefile_id=sf['source_id'], deleted=sf['deleted'], created_at=sf['created_at'])
Exemple #4
0
def delete_posts(sharedfile_id=0, **kwargs):
    """
    This task will update the post table setting any post containing the shared file
    to deleted = 1
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)
    db.execute(
        """UPDATE post SET deleted=1 WHERE sharedfile_id = %s AND deleted=0""",
        sharedfile_id)
    db.close()
def main():
    db1 = Connection(options.database_host, options.database_user,
                     options.database_password)

    #grab all group shakes
    shakes = db1.query("""SELECT id, user_id FROM shake WHERE type=%s""",
                       "group")

    for shake in shakes:
        db1.execute(
            """INSERT IGNORE INTO subscription (user_id, shake_id, deleted, created_at, updated_at)
                        VALUES (%s, %s, 0, NOW(), NOW())""", shake['user_id'],
            shake['id'])
Exemple #6
0
def log_dba_jobs_progress(v_job_id,v_cur_prog_desc,v_cur_cum_prog_desc,v_cur_prog_com_rate):

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_add_job_progress_sql='''insert into dba_job_progress(job_id,cur_prog_desc,cur_cum_prog_desc,cur_prog_com_rate) values(%d,'%s','%s',%d)''' % (
                v_job_id,v_cur_prog_desc,v_cur_cum_prog_desc,v_cur_prog_com_rate)

    db.execute(v_add_job_progress_sql.replace('%','%%'))

    db.close()
Exemple #7
0
def calculate_likes(sharedfile_id, **kwargs):
    """
    This task will get all likes for a shared file, count them, then update the sharedfile.like_count
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)
    result = db.get(
        "SELECT count(id) as like_count from favorite where sharedfile_id = %s and deleted=0",
        (sharedfile_id))
    like_count = int(result['like_count'])
    db.execute("UPDATE sharedfile set like_count = %s WHERE id = %s",
               like_count, sharedfile_id)

    tweet_or_magic(db, sharedfile_id, like_count)
    db.close()
Exemple #8
0
def final_dba_job(v_job_id):

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    # 更新job队列状态为完成
    v_update_job_sql = '''update dba_jobs set status=1 where job_id=%d''' % (
        v_job_id)


    db.execute(v_update_job_sql.replace('%','%%'))

    db.close()
def execute_query(query, **kw_dict):
    """Execute query
    """
    conn = Connection(**MYSQL_SETTINGS)
    if kw_dict is None:
        try:
            result = conn.execute(query)
            conn.close()
            return result
        except Exception, e:
            print e
Exemple #10
0
def initial_dba_job(v_op_user,v_op_comment):

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_add_job_sql = '''insert into dba_jobs(op_user,job_desc) values('%s','%s')''' % (
            v_op_user,v_op_comment)

    v_job_id=db.execute(v_add_job_sql.replace('%','%%'))

    db.close()

    return v_job_id
Exemple #11
0
    def post(self):
        title = self.get_argument("title", "")
        author = self.get_argument("author", "")
        category = self.get_argument("category", "")
        date = self.get_argument("date", "")
        profile = self.get_argument("profile", "")
        picUrl = self.get_argument("picUrl", "")
        url = self.get_argument("url", "")

        time = datetime.datetime.strptime(date, "%m/%d/%Y")
        day = {
            0: "Mon",
            1: "Tue",
            2: "Wed",
            3: "Thu",
            4: "Fri",
            5: "Sat",
            6: "Sun",
        }[time.weekday()]

        sql = """INSERT INTO articles (`title`, `author`, `day`, `time`, `url`, `profile`, `picUrl`, `category`)
                 VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}');
              """.format(
              title,
              author,
              day,
              time,
              url,
              profile,
              picUrl,
              category
        )
        db = Connection(settings.DATABASE_SERVER,
                        settings.DATABASE_NAME,
                        settings.DATABASE_USER,
                        settings.DATABASE_PASSWORD,
                        )
        lastrowid = db.execute(sql)
        if lastrowid:
            self.redirect("/articles/details/{0}/".format(lastrowid))
        else:
            template_name = "/upload_error.html"
            self.render(template_name)
Exemple #12
0
def add_posts(shake_id=0,
              sharedfile_id=0,
              sourcefile_id=0,
              deleted=0,
              created_at=None,
              **kwargs):
    """
    This task will get the subscribers for a shake and insert
    a post for every user. If the sourcefile_id exists in posts
    it will set "seen" equal to 1.
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)

    #get subscribers for shake_id
    results = db.query(
        """SELECT user_id as id FROM subscription WHERE shake_id = %s and deleted = 0""",
        shake_id)
    for user in results:
        seen = 0
        #does this sourcefile exist in post for this user?
        existing = db.query(
            """SELECT id FROM post WHERE user_id = %s and sourcefile_id = %s and deleted=0 ORDER BY created_at desc LIMIT 1""",
            user['id'], sourcefile_id)
        #if so, set seen = 1
        if existing:
            seen = 1
        #if not, insert a new row
        if created_at:
            db.execute(
                """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, %s)""",
                user['id'], sourcefile_id, sharedfile_id, seen, deleted,
                shake_id, created_at)
        else:
            db.execute(
                """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, NOW())""",
                user['id'], sourcefile_id, sharedfile_id, seen, deleted,
                shake_id)
    #now insert a post for the user who originally posted it.
    sharedfile = db.get("""SELECT user_id from sharedfile WHERE id = %s""",
                        sharedfile_id)
    db.execute(
        """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, NOW())""",
        sharedfile['user_id'], sourcefile_id, sharedfile_id, 1, 0, shake_id)
    db.close()
Exemple #13
0
def calculate_saves(sharedfile_id, **kwargs):
    """
    Take the id of a sharedfile that just got saved by another user.

    If this file is an original, meaning it has no original_id set, we can safely calculate the save count by how
    many sharedfiles have the file's id as their original_id.
    
    However if the file has an original, we also need to recalculate the original's count, as well as this file's save count. 
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)
    sharedfile = db.get("select original_id from sharedfile where id = %s",
                        sharedfile_id)
    original_id = sharedfile['original_id']

    # If this file is original, calculate it's save count by all sharedfile where this file is the original_id.
    if original_id == 0:
        original_saves = db.get(
            "SELECT count(id) AS count FROM sharedfile where original_id = %s and deleted = 0",
            sharedfile_id)
        db.execute("UPDATE sharedfile set save_count = %s WHERE id = %s",
                   original_saves['count'], sharedfile_id)
    # Otherwise, we need to update the original's save count and this file's save count.
    else:
        original_saves = db.get(
            "SELECT count(id) AS count FROM sharedfile where original_id = %s and deleted = 0",
            original_id)
        db.execute("UPDATE sharedfile set save_count = %s WHERE id = %s",
                   original_saves['count'], original_id)

        # Calc this files new save count, only based on parent since its not original.
        parent_saves = db.get(
            "SELECT count(id) AS count FROM sharedfile where parent_id = %s and deleted = 0",
            sharedfile_id)
        db.execute("UPDATE sharedfile set save_count = %s WHERE id = %s",
                   parent_saves['count'], sharedfile_id)
    db.close()
Exemple #14
0
def main():
    db1 = Connection(options.database_host, options.database_user, options.database_password)
    db1.execute("UPDATE shakesharedfile SET deleted = 1 WHERE deleted = 0 AND sharedfile_id IN (SELECT id FROM sharedfile WHERE deleted = 1)")
Exemple #15
0
def upload_user_stats():  

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    db.execute('truncate table meta_table_statistics')  # 先truncate 再插入

    db.execute('truncate table meta_index_statistics')  # 先truncate 再插入

    


    # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64
    v_sql = r"""SELECT b.ip,b.port,b.id as instance_id,a.id as schema_id,a.name as db_name from 
    resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id"""

    #print v_sql

    upload_tables_list = db.query(v_sql)

    if upload_tables_list: # 对实例表进行循环

        i=0

        

        for upload_table in upload_tables_list:

            instance_id = upload_table['instance_id']

            schema_id = upload_table['schema_id']

            db_name = upload_table['db_name']

            host_ip = upload_table['ip']

            mysql_port = upload_table['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')

            # 取表的元信息然后插入

            v_pl = r"""SELECT table_schema,table_name,rows_read,rows_changed,rows_changed_x_indexes 
            from table_statistics where table_schema='%s' """ % (db_name)

            #print v_pl

            table_list = db_remote.query(v_pl)

            for table_row in table_list:

            

                table_schema = table_row['table_schema']

                table_name = table_row['table_name']

                rows_read = table_row['rows_read']

                rows_changed = table_row['rows_changed']


                rows_changed_x_indexes = table_row['rows_changed_x_indexes']

                
                
                #try:
                v_insert_sql='''insert into meta_table_statistics(instance_id,schema_id,table_schema,
                table_name,rows_read,rows_changed,rows_changed_x_indexes) 
                values(%d,%d,'%s','%s',%d,%d,%d)''' % (
                instance_id,schema_id,table_schema,
                table_name,rows_read,rows_changed,rows_changed_x_indexes
                )

                #print v_insert_sql

                db.execute(v_insert_sql.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            

            # 取索引的元信息,然后插入

            v_pl2 = r"""SELECT table_schema,table_name,index_name,rows_read from index_statistics 
            where TABLE_SCHEMA='%s' """ % (db_name)


            table_list2 = db_remote.query(v_pl2)

            for table_row2 in table_list2:

                

                table_schema = table_row2['table_schema']

                table_name = table_row2['table_name']

                index_name = table_row2['index_name']

                
                rows_read = table_row2['rows_read']

                
                #try:
                v_insert_sql2='''insert into meta_index_statistics(instance_id,schema_id,table_schema,
                table_name,index_name,rows_read) 
                values(%d,%d,'%s','%s','%s',%d)''' % (
                instance_id,schema_id,table_schema,
                table_name,index_name,rows_read
                )

                #print v_insert_sql

                db.execute(v_insert_sql2.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)



            db_remote.close()


            # 开始生成log_hot_tables 统计表数据

            # statistic_time 为统计数据的实际生成时间


            v_insert_sql3='''insert into log_hot_tables(instance_id,schema_id,table_schema,
                table_name,TABLE_COMMENT,TABLE_CREATE_TIME,rows_read,rows_changed,
                rows_changed_x_indexes) select a.instance_id,a.schema_id,a.TABLE_SCHEMA,
                a.TABLE_NAME,b.TABLE_COMMENT,b.CREATE_TIME,a.ROWS_READ,a.ROWS_CHANGED,
                a.ROWS_CHANGED_X_INDEXES from meta_table_statistics a,meta_tables b where 
                a.instance_id=%d and a.schema_id=%d and 
                a.TABLE_SCHEMA='%s' and a.instance_id=b.instance_id and a.TABLE_SCHEMA=b.TABLE_SCHEMA 
                and a.TABLE_NAME=b.TABLE_NAME''' % (
                instance_id,schema_id,table_schema
                )

            print v_insert_sql3

            db.execute(v_insert_sql3.replace('%','%%'))
            


            

            i=i+1


    db.close()

   # 开始远程flush 统计信息表

    # for remote execute

    os_user = '******'

    OS_APPS_PASSWD = config.OS_APPS_PASSWD

    DB_USER = config.DB_USER

    DB_PASSWD = config.DB_PASSWD

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64
    v_sql = r"""SELECT distinct b.ip,b.port from 
    resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id"""

    #print v_sql

    server_list = db.query(v_sql)

    if server_list: # 对实例表进行循环

        i=0

        for single_server in server_list:


            host_ip = single_server['ip']

            mysql_port = single_server['port']


            # 远程paramiko调用 在本机执行sql

            exe_sql = 'flush table_statistics;flush index_statistics;'  

            result = func.remote_exe_sql(host_ip,os_user,OS_APPS_PASSWD,mysql_port,'information_schema',exe_sql,DB_USER,DB_PASSWD)

            if result == '':
                result = '执行成功!'

            print result
def add_single_backup_job(v_setup_id):  # mysql_ins_bak_setup 表的当前更新或插入的备份实例id

    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    scheduler = BackgroundScheduler()  # 默认内存的jobstore

    url = "sqlite:////home/apps/dbajob.sqlite"

    scheduler.add_jobstore("sqlalchemy", url=url, alias="sqlite_js")

    scheduler.print_jobs()

    print "a"

    # 连接配置中心库,获取数据库备份周期等信息
    db = Connection("/tmp/mysql3306.sock", config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone="+8:00")

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id and a.id=%d""" % (
        v_setup_id
    )

    print v_sql

    bak_server = db.get(v_sql)

    instance_id = bak_server["instance_id"]

    from_host = bak_server["ip"]

    # print from_host

    mysql_port = bak_server["port"]

    backup_interval_type = bak_server["backup_interval_type"]

    backup_start_time = bak_server["backup_start_time"]

    str_start_date = time.strftime("%Y-%m-%d") + " " + backup_start_time

    print str_start_date

    v_job_id = "backup_%s_%s" % (from_host, str(mysql_port))

    if backup_interval_type == 1:  # every day

        # scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            days=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    elif backup_interval_type == 2:  # every week weeks=1

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            weeks=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    elif backup_interval_type == 3:  # every hour hours=1

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            hours=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    else:
        pass

    scheduler.print_jobs()

    print "b"

    scheduler.start()

    scheduler.print_jobs()

    print "c"

    # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

    v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
    values(%d,0)""" % (
        instance_id
    )

    db.execute(v_sche_start_sql)

    db.close()
Exemple #17
0
# to learn how to use it.

# Installation: pip install torndb
# Official doc: http://torndb.readthedocs.org/en/latest/
# Source: https://github.com/bdarnell/torndb/blob/master/torndb.py

from torndb import Connection

# Connect by IP address
db = Connection('127.0.0.1', 'database_name', user='******', password='******')
# Connect by IP address with port
db = Connection('127.0.0.1:1234', 'database_name', user='******', password='******')
# Connect by socket
db = Connection('/tmp/mysql.sock', 'database_name', user='******', password='******')
# Connection over SSH, open a SSH tunnel with
#     ssh -L 1234:localhost:3306 [email protected]
# then connect to 127.0.0.1:1234

# Retreive one object
post = db.get("SELECT * FROM posts LIMIT 1")

# Retreive several objects
for post in db.query("SELECT * FROM posts"):
    print post.title
  
# Insert one entry
db.execute("INSERT INTO posts (user_id, content) VALUES (%s, %s)", 12, "hello world !")

# Insert multiple entries
values = [(12, "hello"), (17, "world"), (22, "blah")]
db.executemany("INSERT INTO posts (user_id, content) VALUES (%s, %s)", values)
def add_schedule_backup_job():
    # if __name__ == '__main__':
    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    scheduler = BackgroundScheduler()  # 默认内存的jobstore

    url = "sqlite:////home/apps/dbajob.sqlite"

    scheduler.add_jobstore("sqlalchemy", url=url, alias="sqlite_js")

    scheduler.print_jobs()

    print "a"

    scheduler.remove_all_jobs(jobstore="sqlite_js")

    scheduler.print_jobs()

    print "remove"

    # v_current_jobs = scheduler.get_jobs()

    # print v_current_jobs

    # if v_current_jobs:  # 如果job存在的话,先请客

    #     scheduler.remove_job('backup')

    # 连接配置中心库,获取数据库备份周期等信息
    db = Connection("/tmp/mysql3306.sock", config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone="+8:00")

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id """

    print v_sql

    bak_server_list = db.query(v_sql)

    if bak_server_list:  # 有server需要配置

        i = 0

        # 把还没有开始的调度任务,置为手工结束 backup_result_type=4
        v_manual_end_sql = "update mysql_ins_bak_log set backup_result_type=4 where backup_result_type=0"

        db.execute(v_manual_end_sql)

        for bak_server in bak_server_list:

            instance_id = bak_server["instance_id"]

            from_host = bak_server["ip"]

            # print from_host

            mysql_port = bak_server["port"]

            backup_interval_type = bak_server["backup_interval_type"]

            backup_start_time = bak_server["backup_start_time"]

            str_start_date = time.strftime("%Y-%m-%d") + " " + backup_start_time

            print str_start_date

            v_job_id = "backup_%s_%s" % (from_host, str(mysql_port))

            if backup_interval_type == 1:  # every day

                # scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    days=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            elif backup_interval_type == 2:  # every week weeks=1

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    weeks=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            elif backup_interval_type == 3:  # every hour hours=1

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    hours=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            else:
                pass
            # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

            v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
            values(%d,0)""" % (
                instance_id
            )

            db.execute(v_sche_start_sql)

            i = i + 1

        scheduler.print_jobs()

        print "b"

        scheduler.start()

        scheduler.print_jobs()

        print "c"

    db.close()
Exemple #19
0
def generate_mysql_config(tlp_file, schema_file, force=False):
    """
    Args:
        tlp_file(unicode): config template to be written in to config center db
        schema_file(unicode): scheme file
    """

    if not os.path.exists(tlp_file):
        raise ImproperlyConfigured(u"invalid config template file `%s`" %
                                   tlp_file)
    if not os.path.exists(schema_file):
        raise ImproperlyConfigured(u"invalid config scheme file path `%s`" %
                                   schema_file)
    with open(tlp_file) as fi:
        config = json.load(fi)
    with open(schema_file) as fi:
        schema = json.load(fi)
        Draft4Validator(schema).validate(config)
    config_center = settings.CONFIG_CENTER
    env = settings.ENV
    logging.warn("init config center with env `%s`" % env)
    from torndb import Connection
    db = None
    try:
        db_name = config_center["database"]
        db = Connection(host=config_center["host"],
                        database=db_name,
                        user=config_center["user"],
                        password=config_center["password"])
        table_name = "t_s_config_%s" % env
        if force:
            db.execute("DROP TABLE if EXISTS %s" % table_name)
        if db.query("show tables where Tables_in_%s = '%s'" %
                    (db_name, table_name)):
            raise RuntimeError(
                u"config center of env `%s` has already been initialized !")
        create_table_sql = u"""
            CREATE TABLE IF NOT EXISTS %s (
              `id` int(20) unsigned zerofill NOT NULL AUTO_INCREMENT,
              `node` varchar(40) COLLATE utf8_bin NOT NULL COMMENT '模块节点名',
              `key` varchar(40) COLLATE utf8_bin NOT NULL COMMENT '模块下键名',
              `value` varchar(200) COLLATE utf8_bin DEFAULT NULL COMMENT '模块键值',
              `key_type` varchar(6) COLLATE utf8_bin NOT NULL DEFAULT 'string' COMMENT '配置键对应的类型',
              `key_note` varchar(45) COLLATE utf8_bin DEFAULT NULL COMMENT '键名备注说明',
              PRIMARY KEY (`id`),
              UNIQUE KEY `unique_config` (`node`,`key`) USING BTREE COMMENT '配置唯一索引'
            ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='%s环境 系统运行配置表'"""\
                           % (table_name, env)
        db.execute(create_table_sql)
        sql = "INSERT INTO %s" % table_name
        sql += "(`node`, `key`, `value`, `key_type`) VALUES (%s, %s, %s, %s)"
        datas = []
        for key, value in config.items():
            for k, v in value.items():
                key_type = "string"
                if isinstance(v, list):
                    v = ", ".join(v)
                elif isinstance(v, bool):
                    key_type = "bool"
                elif isinstance(v, int):
                    key_type = "int"

                data = (key, k, v, key_type)
                datas.append(data)
        db.executemany_rowcount(sql, datas)
    except KeyError as e:
        raise ImproperlyConfigured(u"need config key `%s` of config_center" %
                                   (e.args[0]))
    finally:
        if db:
            db.close()
def start_schedule():

#if __name__ == '__main__':
    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    
    scheduler = Scheduler(daemonic = False)

    scheduler.print_jobs()

    #scheduler.remove_jobstore('file',close=True)

    #scheduler.shutdown(wait=False)

    

    scheduler.shutdown()

    #scheduler.unschedule_func(backup)

    scheduler.add_jobstore(ShelveJobStore('/tmp/db_schedule'), 'file')

    v_current_jobs = scheduler.get_jobs()

    print v_current_jobs

    if v_current_jobs:  # 如果job存在的话,先请客

     

        scheduler.unschedule_func(backup)

    

    #scheduler = Scheduler(standalone=True)
    #scheduler = Scheduler(daemon=True)
    #连接配置中心库,获取数据库备份周期等信息
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id """

    print v_sql

    bak_server_list = db.query(v_sql)

    if bak_server_list: # 有server需要配置

        i=0

        # 把还没有开始的调度任务,置为手工结束 backup_result_type=4
        v_manual_end_sql = 'update mysql_ins_bak_log set backup_result_type=4 where backup_result_type=0'

        db.execute(v_manual_end_sql)

        for bak_server in bak_server_list:

            instance_id = bak_server['instance_id']

            from_host = bak_server['ip']

            #print from_host

            mysql_port = bak_server['port']

            backup_interval_type = bak_server['backup_interval_type']

            backup_start_time = bak_server['backup_start_time']

            str_start_date= time.strftime("%Y-%m-%d") + ' ' + backup_start_time

            print str_start_date 

            if backup_interval_type == 1: # every day

                #内存jobstore

                #scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

                #文件jobstore jobstore='file'

                scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')
               
                #scheduler.add_interval_job(backup, days=1, start_date='2014-07-18 18:17:01', args=[from_host, mysql_port, os_user, os_password])

            elif backup_interval_type == 2: # every week

                scheduler.add_interval_job(backup, weeks=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

            elif backup_interval_type == 3: # every hour

                scheduler.add_interval_job(backup, hours=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

            # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

            v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
            values(%d,0)""" % (instance_id)

            db.execute(v_sche_start_sql)

            i=i+1


    db.close()


    if bak_server_list: # 有server需要配置

        scheduler.start()

        print 'success!'

        scheduler.print_jobs()

        '''
class MySQLHelper:

    def __init__(self, host, user, password, database):
        self._db = Connection(host, database, user=user, password=password, max_idle_time=10)
        if not self._db._db:
            raise Exception('%s' % self._db.error_msg)

    def query(self, sql, *parameters):
        return self._db.query(sql, *parameters)

    def query_one(self, sql, *parameters):
        res = self._db.query(sql, *parameters)
        return res.pop() if res else {}

    def write(self, sql, *parameters):
        return self._db.execute(sql, *parameters)

    def gen_insert(self, tablename, rowdict, replace=False):
        return self._db.gen_insert_sql(tablename, rowdict, replace)

    def insert_dict(self, tablename, rowdict):
        key_strs = ", ".join(["""`%s`""" % key for key in rowdict.keys()])
        value_strs = ", ".join(["""'%s'""" % rowdict.get(key) for key in rowdict.keys()])
        sql = """INSERT INTO %s (%s) VALUES (%s)""" % (tablename, key_strs, value_strs)
        return self._db.execute(sql)

    def insert_batch(self, tablename, batch_params):
        value_batch = []
        for param in batch_params:
            keys = param.keys()
            key_strs = ", ".join(["""`%s`""" % key for key in keys])
            value_strs = "(%s)" % ", ".join(
                ["""'%s'""" % "%s" % param.get(key) for key in keys])
            value_batch.append(value_strs)
        sql = """INSERT INTO %s (%s) VALUES %s""" % (tablename, key_strs, ",".join(value_batch))
        return self._db.execute(sql)

    def update_dict(self, tablename, rowdict, where):
        sql = """UPDATE %s SET %s WHERE %s""" % (
        tablename, self._formatter(rowdict, ', '), self._formatter(where, " AND "))
        return self._db.execute(sql)

    def transaction(self, query, parameters):
        return self._db.transaction(query, parameters)

    def get(self, tablename, conds, cols='', extra_conds={}):
        if not tablename:
            return False
        cols = "%s" % ','.join(cols) if cols else '*'
        wheres = []
        values = []
        if conds and isinstance(conds, dict):
            for key, value in conds.items():
                if isinstance(value, (list, tuple)):
                    wheres.append("`%s` IN (%s)" % (key, "'%s'" % "','".join([str(v) for v in value])))
                else:
                    wheres.append("`%s`=%%s" % key)
                    values.append("%s" % value)
        where_str = ' AND '.join(wheres)
        sql = """ SELECT %s FROM `%s` """ % (cols, tablename)
        if where_str:
            sql += """ WHERE %s """ % (where_str)
        if extra_conds.get('group_by'):
            sql += """ GROUP by %s """ % ','.join(extra_conds['group_by'])
        if extra_conds.get('order_by'):
            sql += """ ORDER by %s """ % ','.join(extra_conds['order_by'])
        if extra_conds.get('limit'):
            sql += """ LIMIT %s """ % ','.join(map(str, extra_conds['limit']))

        return self._db.query(sql, *values)


    def _serialize(self, value):
        if isinstance(value, (dict, list, set)):
            value = json.dumps(value)
        else:
            value = "%s" % value
        return value

    def _formatter(self, pairs, delimiter):
        values = []
        for key, value in pairs.items():
            if not isinstance(value, list):
                value = self._serialize(value)
                values.append("""`%s`='%s'""" % (key, value))
            else:
                values.append("""`%s` in ("%s")""" % (key, '","'.join([self._serialize(val) for val in value])))
        return delimiter.join(values)

    def __del__(self):
        self._db.close()
def upload_auto_increment():

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT id,ip,port from tag b where online_flag=1 and is_showprocesslist=1"""

    print v_sql

    upload_server_list = db.query(v_sql)

    if upload_server_list: # 对实例表进行循环

        i=0

        

        for upload_server in upload_server_list:

            instance_id = upload_server['id']

            host_ip = upload_server['ip']

            mysql_port = upload_server['port']

            v_host =host_ip + ':' + str(mysql_port)

            print v_host

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')


            v_pl = r"""select information_schema . columns . TABLE_SCHEMA AS TABLE_SCHEMA,
                        information_schema . columns . TABLE_NAME AS TABLE_NAME,
                        information_schema . columns . COLUMN_NAME AS COLUMN_NAME,
                        information_schema . columns . DATA_TYPE AS DATA_TYPE,
                        information_schema . columns . COLUMN_TYPE AS COLUMN_TYPE,
                        ((case information_schema . columns . DATA_TYPE
                        when 'tinyint' then
                            255
                        when 'smallint' then
                            65535
                        when 'mediumint' then
                            16777215
                        when 'int' then
                            4294967295
                        when 'bigint' then
                            18446744073709551615
                        end) >>
                        if((locate('unsigned', information_schema . columns . COLUMN_TYPE) > 0),
                            0,
                            1)) AS MAX_VALUE,
                        information_schema . tables . AUTO_INCREMENT AS AUTO_INCREMENT,
                        (information_schema . tables .
                        AUTO_INCREMENT /
                        ((case information_schema . columns . DATA_TYPE
                            when 'tinyint' then
                                255
                            when 'smallint' then
                                65535
                            when 'mediumint' then
                                16777215
                            when 'int' then
                                4294967295
                            when 'bigint' then
                                18446744073709551615
                        end) >>
                        if((locate('unsigned', information_schema . columns . COLUMN_TYPE) > 0),
                            0,
                            1))) AS AUTO_INCREMENT_RATIO
                        from (information_schema . columns join information_schema . tables
                          on(((information_schema . columns . TABLE_SCHEMA = information_schema .
                               tables . TABLE_SCHEMA) and
                               (information_schema . columns . TABLE_NAME = information_schema .
                               tables . TABLE_NAME))))
                        where ((information_schema . columns .TABLE_SCHEMA not in
                                    ('mysql', 'INFORMATION_SCHEMA', 'performance_schema','common_schema')) and
                                    (information_schema . tables . TABLE_TYPE = 'BASE TABLE') and
                                    (information_schema . columns . EXTRA = 'auto_increment'))"""

            print v_pl

            process_list = db_remote.query(v_pl)

            for process_row in process_list:

                vp_server_ip = v_host

                vp_instance_id = instance_id

                #vp_id = process_row['ID']
                vp_table_schema = process_row['TABLE_SCHEMA']

                vp_table_name = process_row['TABLE_NAME']

                vp_column_name = process_row['COLUMN_NAME']

                vp_data_type = process_row['DATA_TYPE']

                # 若是空,变成mysql的null,否则加上引号再传递进去,格式为%s ,而不是'%s'
                #if vp_db is None:
                #    vp_db = 'NULL'
                #else:
                #    vp_db = "'"+vp_db+"'"

                #print vp_db

                vp_column_type = process_row['COLUMN_TYPE']

                vp_max_value = process_row['MAX_VALUE']

                vp_auto_increment = process_row['AUTO_INCREMENT']

                vp_auto_increment_ratio = process_row['AUTO_INCREMENT_RATIO']

                #vp_info = process_row['INFO']

                #if vp_info is None:
                #    vp_info = 'NULL'
                #else:
                #    #vp_info = "'"+vp_info+"'"
                #    vp_info = vp_info.replace('"',"'") # 双引号替换为单引号
                #    vp_info = '"'+vp_info+'"'   # 防止字符里面本身包含单引号

                # v_insert_sql='''insert into log_processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                # TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s','%s','%s',%d,'%s','%s',%d,%d,%d)''' % (
                # instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)
                
                #try:
                v_delete_sql='''delete from  auto_increment_list where INSTANCE_ID = '%s' and TABLE_SCHEMA = '%s' and TABLE_NAME = '%s' and COLUMN_NAME = '%s' ''' % (
                vp_instance_id,vp_table_schema,vp_table_name,vp_column_name)


                print v_delete_sql

                db.execute(v_delete_sql)


                v_insert_sql='''insert into auto_increment_list(instance_id,server_ip,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,DATA_TYPE,COLUMN_TYPE,MAX_VALUE,AUTO_INCREMENT,AUTO_INCREMENT_RATIO) 
                values(%d,'%s','%s','%s','%s','%s','%s',%d,%d,%d)''' % (
                vp_instance_id,vp_server_ip,vp_table_schema,vp_table_name,vp_column_name,vp_data_type,vp_column_type,int(vp_max_value),int(vp_auto_increment),int(vp_auto_increment_ratio))


                print v_insert_sql

                db.execute(v_insert_sql)

                    #db.execute(v_insert_sql)
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert process_list error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            db_remote.close()

            

            i=i+1

        
        v_sendmail_sql=r'''select count(*) count
                    from auto_increment_list b where AUTO_INCREMENT_RATIO >= 80 '''
        

        
        if_list = db.query(v_sendmail_sql)
        
        for if_row in if_list:
            v_if = if_row['count']
            print v_if    
            if v_if >= 1 :
                print '11'
                
                
                v_warn_sql=r'''select SERVER_IP,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,MAX_VALUE,AUTO_INCREMENT,AUTO_INCREMENT_RATIO 
                            from auto_increment_list b where AUTO_INCREMENT_RATIO >= 80 '''
                print v_warn_sql
                warn_list = db.query(v_warn_sql)
                v_server_ip = '\r\n'
                for warn_row in warn_list:
                    v_server_ip = v_server_ip + warn_row['SERVER_IP'] +'|对象名:' + warn_row['TABLE_SCHEMA'] +'|表名:'+ warn_row['TABLE_NAME'] + '|字段名:' + warn_row['COLUMN_NAME']+'\r\n'                  
                    
                print v_server_ip
                v_msg_text = v_server_ip
                v_receiver = '[email protected],[email protected]'
                v_subject = '有快溢出的自增ID,细节请去监控页面查看'
                v_return = func.send_mail_to_devs(v_receiver,v_subject,v_msg_text)

    db.close()
# -*- coding: utf-8 -*-

from torndb import Connection

db = Connection('127.0.0.1', 'mysql', 'root')

try:
    db.execute('create database `car_spider`;')
    print 'create database success.'
except:
    print 'database exists.'
    pass

car_info_sql = """
CREATE TABLE `car_spider`.`car` (
    `id` int(11) NOT NULL AUTO_INCREMENT,
    `logo` varchar(200) NOT NULL,
    `model` varchar(255),
    `befor_price` varchar(100),
    `after_price` int(12),
    `plan` varchar(500),
    `Purchased` int(12),
    `link` varchar(255) default "",
    `created` datetime,
    `updated` datetime,
    `status` varchar(10),
    PRIMARY KEY (`id`),
    INDEX `idx_dt` (created)
) ENGINE=MyISAM AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
"""
Exemple #24
0
def upload_processlist():

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT id,ip,port from tag b where online_flag=1 and is_showprocesslist=1"""

    #print v_sql

    upload_server_list = db.query(v_sql)

    if upload_server_list: # 对实例表进行循环

        i=0

        

        for upload_server in upload_server_list:

            instance_id = upload_server['id']

            host_ip = upload_server['ip']

            mysql_port = upload_server['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')


            v_pl = r"""SELECT ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,TIME_MS,ROWS_SENT,ROWS_EXAMINED from PROCESSLIST"""

            #print v_pl

            process_list = db_remote.query(v_pl)

            for process_row in process_list:

                vp_id = process_row['ID']

                vp_user = process_row['USER']

                vp_host = process_row['HOST']

                vp_db = process_row['DB']

                # 若是空,变成mysql的null,否则加上引号再传递进去,格式为%s ,而不是'%s'
                if vp_db is None:
                    vp_db = 'NULL'
                else:
                    vp_db = "'"+vp_db+"'"

                #print vp_db

                vp_command = process_row['COMMAND']

                vp_time = process_row['TIME']

                vp_state = process_row['STATE']

                vp_info = process_row['INFO']

                if vp_info is None:
                    vp_info = 'NULL'
                else:
                    #vp_info = "'"+vp_info+"'"
                    vp_info = vp_info.replace('"',"'") # 双引号替换为单引号
                    vp_info = '"'+vp_info+'"'   # 防止字符里面本身包含单引号

                vp_time_ms = process_row['TIME_MS']

                vp_rows_sent = process_row['ROWS_SENT']

                vp_rows_examined = process_row['ROWS_EXAMINED']

                # v_insert_sql='''insert into log_processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                # TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s','%s','%s',%d,'%s','%s',%d,%d,%d)''' % (
                # instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)
                
                #try:
                v_insert_sql='''insert into log_processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s',%s,'%s',%d,'%s',%s,%d,%d,%d)''' % (
                instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)


                #print v_insert_sql

                db.execute(v_insert_sql.replace('%','%%'))

                    #db.execute(v_insert_sql)
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert process_list error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            db_remote.close()

            

            i=i+1


    db.close()
Exemple #25
0
def upload_meta_tables():  
    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    db.execute('truncate table meta_tables')  # 先truncate 再插入

    db.execute('truncate table meta_statistics')  # 先truncate 再插入

    db.execute('truncate table meta_redundant_keys')  # 先truncate 再插入

    # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64
    v_sql = r"""SELECT b.ip,b.port,b.id as instance_id,a.id as schema_id,a.name as db_name from 
    resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id"""

    #print v_sql

    upload_tables_list = db.query(v_sql)

    if upload_tables_list: # 对实例表进行循环

        i=0

        

        for upload_table in upload_tables_list:

            instance_id = upload_table['instance_id']

            schema_id = upload_table['schema_id']

            db_name = upload_table['db_name']

            host_ip = upload_table['ip']

            mysql_port = upload_table['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')

            # 取表的元信息然后插入

            v_pl = r"""SELECT table_catalog,table_schema,table_name,table_type,engine,version,row_format,
            table_rows,avg_row_length,data_length,max_data_length,index_length,data_free,auto_increment,create_time,
            update_time,check_time,table_collation,checksum,
            create_options,table_comment from tables where table_type='BASE TABLE' and TABLE_SCHEMA='%s' """ % (db_name)

            #print v_pl

            table_list = db_remote.query(v_pl)

            for table_row in table_list:

                table_catalog = table_row['table_catalog']

                table_schema = table_row['table_schema']

                table_name = table_row['table_name']

                table_type = table_row['table_type']

                engine = table_row['engine']


                version = table_row['version']

                row_format = table_row['row_format']

                table_rows = table_row['table_rows']

                avg_row_length = table_row['avg_row_length']

                data_length = table_row['data_length']

                max_data_length = table_row['max_data_length']

                index_length = table_row['index_length']

                data_free = table_row['data_free']

                auto_increment = table_row['auto_increment']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入
                if auto_increment is None:
                    auto_increment = 'NULL'
                else:
                    auto_increment = "'"+str(auto_increment)+"'"

                # 日期型插入,由于要处理null值插入,经过处理后,以%s 进行插入
                create_time = table_row['create_time']

                if create_time is None:
                    create_time = 'NULL'
                else:
                    create_time = "'"+str(create_time)+"'"
                

                # 日期型插入,由于要处理null值插入,经过处理后,以%s 进行插入
                update_time = table_row['update_time']

                if update_time is None:
                    update_time = 'NULL'
                else:
                    update_time = "'"+str(update_time)+"'"


                check_time = table_row['check_time']

                if check_time is None:
                    check_time = 'NULL'
                else:
                    check_time = "'"+str(check_time)+"'"


                table_collation = table_row['table_collation']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入

                checksum = table_row['checksum']

                if checksum is None:
                    checksum = 'NULL'
                else:
                    checksum = "'"+str(checksum)+"'"
                


                create_options = table_row['create_options']

                table_comment = table_row['table_comment']

                
                #try:
                v_insert_sql='''insert into meta_tables(instance_id,schema_id,table_catalog,table_schema,table_name,
                table_type,engine,version,row_format,table_rows,avg_row_length,data_length,max_data_length,
                index_length,data_free,auto_increment,create_time,update_time,check_time,table_collation,
                checksum,create_options,table_comment) 
                values(%d,%d,'%s','%s','%s',
                '%s','%s',%d,'%s',%d,%d,%d,%d,
                %d,%d,%s,%s,%s,%s,'%s',
                %s,'%s','%s')''' % (
                instance_id,schema_id,table_catalog,table_schema,table_name,
                table_type,engine,version,row_format,table_rows,avg_row_length,data_length,max_data_length,
                index_length,data_free,auto_increment,create_time,update_time,check_time,table_collation,
                checksum,create_options,table_comment
                )

                #print v_insert_sql

                db.execute(v_insert_sql.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)


            # 取索引的元信息,然后插入

            v_pl2 = r"""SELECT table_catalog,table_schema,table_name,non_unique,index_schema,
            index_name,seq_in_index,column_name,collation,cardinality,sub_part,packed,nullable,
            index_type,comment,index_comment from statistics where TABLE_SCHEMA='%s' """ % (db_name)


            table_list2 = db_remote.query(v_pl2)

            for table_row2 in table_list2:

                table_catalog = table_row2['table_catalog']

                table_schema = table_row2['table_schema']

                table_name = table_row2['table_name']

                non_unique = table_row2['non_unique']

                index_schema = table_row2['index_schema']


                index_name = table_row2['index_name']

                seq_in_index = table_row2['seq_in_index']

                column_name = table_row2['column_name']

                collation = table_row2['collation']

                cardinality = table_row2['cardinality']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入

                if cardinality is None:
                    cardinality = 'NULL'
                else:
                    cardinality = "'"+str(cardinality)+"'"


                sub_part = table_row2['sub_part']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入

                if sub_part is None:
                    sub_part = 'NULL'
                else:
                    sub_part = "'"+str(sub_part)+"'"


                packed = table_row2['packed']

                if packed is None:
                    packed = 'NULL'
                else:
                    
                    packed = '"'+packed+'"'   # 防止字符里面本身包含单引号


                nullable = table_row2['nullable']
                
                index_type = table_row2['index_type']

                comment = table_row2['comment']

                index_comment = table_row2['index_comment']

                

                
                #try:
                v_insert_sql2='''insert into meta_statistics(instance_id,schema_id,table_catalog,
                table_schema,table_name,non_unique,index_schema,index_name,seq_in_index,
                column_name,collation,cardinality,sub_part,packed,nullable,index_type,comment,index_comment) 
                values(%d,%d,'%s',
                '%s','%s',%d,'%s','%s',%d,
                '%s','%s',%s,%s,%s,'%s','%s','%s','%s')''' % (
                instance_id,schema_id,table_catalog,
                table_schema,table_name,non_unique,index_schema,index_name,seq_in_index,
                column_name,collation,cardinality,sub_part,packed,nullable,index_type,comment,index_comment
                )

                #print v_insert_sql

                db.execute(v_insert_sql2.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            db_remote.close()


            #连接远程实例,提取冗余索引信息
            db_remote2 = Connection(v_host,
                            'common_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')

            # 取common_schema.redundant_keys 视图信息然后插入

            v_pl3 = r"""SELECT table_schema,table_name,redundant_index_name,redundant_index_columns,
            redundant_index_non_unique,dominant_index_name,dominant_index_columns,
            dominant_index_non_unique,subpart_exists,sql_drop_index 
            from redundant_keys where table_schema='%s' """ % (db_name)

            #print v_pl

            table_list3 = db_remote2.query(v_pl3)

            for table_row3 in table_list3:

            

                table_schema = table_row3['table_schema']

                table_name = table_row3['table_name']

                redundant_index_name = table_row3['redundant_index_name']

                redundant_index_columns = table_row3['redundant_index_columns']


                redundant_index_non_unique = table_row3['redundant_index_non_unique']

                dominant_index_name = table_row3['dominant_index_name']

                dominant_index_columns = table_row3['dominant_index_columns']

                dominant_index_non_unique = table_row3['dominant_index_non_unique']


                subpart_exists = table_row3['subpart_exists']

                sql_drop_index = table_row3['sql_drop_index']

                
                
                #try:
                v_insert_sql3='''insert into meta_redundant_keys(instance_id,schema_id,table_schema,
                table_name,redundant_index_name,redundant_index_columns,redundant_index_non_unique,
                dominant_index_name,dominant_index_columns,
                dominant_index_non_unique,subpart_exists,sql_drop_index) 
                values(%d,%d,'%s',
                '%s','%s','%s',%d,
                '%s','%s',
                %d,%d,'%s')''' % (
                instance_id,schema_id,table_schema,
                table_name,redundant_index_name,redundant_index_columns,redundant_index_non_unique,
                dominant_index_name,dominant_index_columns,
                dominant_index_non_unique,subpart_exists,sql_drop_index
                )

                #print v_insert_sql

                db.execute(v_insert_sql3.replace('%','%%'))


            db_remote2.close()

            

            i=i+1


    db.close()
Exemple #26
0
class Database:

    """docstring for ClassName"""

    def __init__(
        self, master_host,from_host, to_host, db_ip_priv,

        os_user,os_password_source,os_password_target,os_password_priv,

        db_user,db_user_pwd_source,db_user_pwd_target, db_user_pwd_priv,

        db_port_master,db_port_source, db_port_target,db_port_priv,

        db_user_name_rep,db_rep_pwd,
        
        db_root_user,db_root_pwd_target):

        self.master_host = master_host
        self.from_host = from_host
        self.to_host = to_host
        self.db_ip_priv = db_ip_priv

        self.port = 22   #ssh 端口

        self.os_user = os_user
        self.os_password_source = os_password_source
        self.os_password_target = os_password_target
        self.os_password_priv = os_password_priv


        self.db_user_name = db_user
        self.db_user_pwd_source = db_user_pwd_source
        self.db_user_pwd_target = db_user_pwd_target
        self.db_user_pwd_priv = db_user_pwd_priv

        self.db_port_master = db_port_master
        self.db_port_source = db_port_source
        self.db_port_target = db_port_target
        self.db_port_priv = db_port_priv

        self.db_user_name_rep = db_user_name_rep
        self.db_rep_pwd = db_rep_pwd

        self.db_root_user = db_root_user
        self.db_root_pwd_target = db_root_pwd_target
        
                
        self.today = datetime.date.today().strftime("%Y%m%d")
        self.xtra_time = datetime.datetime.now().strftime('%Y-%m-%d_%H%M%S')

        self.xtrabackup_bin_path = config.xtrabackup_bin_path
        self.xtrabackup_export_path = config.xtrabackup_export_path
        #self.xtrabackup_restore_path = config.xtrabackup_restore_path
        self.xtrabackup_restore_path = config.mysql_datadir_path

        self.mysql_client_path = config.mysql_client_path
        self.mydumper_bin_path = config.mydumper_bin_path
        self.mydumper_export_path = config.mydumper_export_path
        self.mydumper_import_path = config.mydumper_import_path

        self.db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

        
    def export_database_metadata(self,v_prosess_id):  # 导出数据库的表结构和视图以及存储过程

        text = "%s %s" % (
            datetime.datetime.now(), 
            "One: Begin export master Database table stru,views,procs, Please wait ....")

        print "\033[1;32;40m%s\033[0m" % text  # 绿色
        log_w(text)

        v_db_socket='--socket=/tmp/mysql'+str(self.db_port_source)+'.sock'

        try:
            s = paramiko.SSHClient()
            s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            s.connect(self.from_host, self.port, self.os_user, self.os_password_source)
            
            # 获取 mysqldump 要导出的数据库列表清单
            # 不能加grep -v ,因为logplatform 数据库会被一起过滤掉
            conm_db_list_1 = r'''%s/mysql -N -u%s %s -e"show databases"|grep -v information_schema|grep -v common_schema|grep -v performance_schema|grep -v mysql|tr "\n" " "''' % (
                self.mysql_client_path,self.db_user_name,v_db_socket)
            v_update_sql = '''update dba_job_progress set cur_prog_shell_cmd = ' ''' + conm_db_list_1 +''' ' where id= '''+ str(v_prosess_id)
            print conm_db_list_1
            print v_update_sql
            self.db.execute(v_update_sql)

            conm_db_list = r'''%s/mysql -N -u%s -p'%s' %s -e"show databases"|grep -v information_schema|grep -v common_schema|grep -v performance_schema|grep -v mysql|tr "\n" " "''' % (
                self.mysql_client_path,self.db_user_name,self.db_user_pwd_source,v_db_socket)

            print conm_db_list

            stdin, stdout, stderr = s.exec_command(conm_db_list)

            if stdout.channel.recv_exit_status() ==0:

                db_list_str = stdout.readlines()[-1]  #返回值本身就一行

                text = "%s  Get mysqldump db list  Execute success !" % datetime.datetime.now()
                log_w(text)
                print "\033[1;32;40m%s\033[0m" % text  # 绿色

            else:

                result = stderr.readlines()[-1].strip()

                text = "%s Get mysqldump db list execute Error ! %s " % (datetime.datetime.now(),result)
                log_w(text)
                print "\033[1;31;40m%s\033[0m" % text  # 古铜色

                return result  # 退出

            # mysqldump 导出 表结构和视图和存储过程
            conm_1 = r'''mkdir -p %s/bak;%s/mysqldump -u%s %s -f --single-transaction -d -R --skip-triggers -B %s > %s/struc.sql''' % (
                self.mydumper_export_path,self.mysql_client_path,self.db_user_name,v_db_socket, db_list_str,self.mydumper_export_path)
            v_update_sql = '''update dba_job_progress set cur_prog_shell_cmd = ' ''' + conm_1 +''' ' where id= '''+ str(v_prosess_id)
            print conm_1
            print v_update_sql
            self.db.execute(v_update_sql)

            conm = r'''mkdir -p %s/bak;%s/mysqldump -u%s -p'%s' %s -f --single-transaction -d -R --skip-triggers -B %s > %s/struc.sql''' % (
                self.mydumper_export_path,self.mysql_client_path,self.db_user_name,self.db_user_pwd_source,v_db_socket, db_list_str,self.mydumper_export_path)

            print conm

            stdin, stdout, stderr = s.exec_command(conm)

            if stdout.channel.recv_exit_status() ==0:

                result = ''

            else:

                result = stderr.readlines()[-1].strip()

            s.close()

            if result == '':
                text = "%s  Mysqldump export table structure  Execute success !" % datetime.datetime.now()
                log_w(text)
                print "\033[1;32;40m%s\033[0m" % text  # 绿色
            else:
                text = "%s Mysqldump export table structure Execute Error ! %s " % (datetime.datetime.now(),result)
                log_w(text)
                print "\033[1;31;40m%s\033[0m" % text  # 古铜色
                
            return result

        except Exception, e:
            print e.message
            text = "Mysqldump export table structure  Error ! Error Reason: %s" % (e.message)
            log_w(text)
            print "\033[1;31;40m%s\033[0m" % text

            return text
Exemple #27
0
    'test.functional.create_account_tests',
    'test.functional.verify_email_tests',
    'test.functional.api_tests',
    'test.functional.voucher_tests',
    'test.scripts.calculate_views_tests',
]


def all():
    return unittest.defaultTestLoader.loadTestsFromNames(TEST_MODULES)


if __name__ == '__main__':

    mltshpoptions.parse_dictionary(test_settings)

    import tornado.testing
    db = Connection(options.database_host, 'mysql', options.database_user,
                    options.database_password)
    try:
        db.execute("CREATE database %s" % options.database_name)
    except MySQLdb.ProgrammingError, exc:
        if exc.args[0] != 1007:  # database already exists
            raise
    else:
        with open("setup/db-install.sql") as f:
            load_query = f.read()
        db.execute("USE %s" % options.database_name)
        db.execute(load_query)
    tornado.testing.main()
def upload_processlist():

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT id,ip,port from tag b where online_flag=1 and is_showprocesslist=1"""

    #print v_sql

    upload_server_list = db.query(v_sql)

    if upload_server_list: # 对实例表进行循环

        i=0

        

        for upload_server in upload_server_list:

            instance_id = upload_server['id']

            host_ip = upload_server['ip']

            mysql_port = upload_server['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')


            v_pl = r"""SELECT ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,TIME_MS,ROWS_SENT,ROWS_EXAMINED from PROCESSLIST"""

            #print v_pl

            process_list = db_remote.query(v_pl)

            for process_row in process_list:

                vp_id = process_row['ID']

                vp_user = process_row['USER']

                vp_host = process_row['HOST']

                vp_db = process_row['DB']

                vp_command = process_row['COMMAND']

                vp_time = process_row['TIME']

                vp_state = process_row['STATE']

                vp_info = process_row['INFO']

                vp_time_ms = process_row['TIME_MS']

                vp_rows_sent = process_row['ROWS_SENT']

                vp_rows_examined = process_row['ROWS_EXAMINED']

                v_insert_sql='''insert into processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s','%s','%s',%d,'%s','%s',%d,%d,%d)''' % (
                instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)

                db.execute(v_insert_sql)

            db_remote.close()

            

            i=i+1


    db.close()
Exemple #29
0
def migrate_for_user(user_id=0, **kwargs):
    """
    This tasks handles copying MLKSHK user data over to the MLTSHP tables.

    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)

    state = db.get("SELECT is_migrated FROM migration_state WHERE user_id=%s",
                   user_id)
    if not state or state["is_migrated"] == 1:
        logger.info("User %s already migrated" % str(user_id))
        db.close()
        return

    logger.info("Migrating user_id %s..." % str(user_id))

    logger.info("- app records")
    db.execute(
        """INSERT IGNORE INTO app (id, user_id, title, description, secret, redirect_url, deleted, created_at, updated_at) SELECT ma.id, ma.user_id, ma.title, ma.description, ma.secret, ma.redirect_url, 0, ma.created_at, ma.updated_at FROM mlkshk_app ma WHERE ma.user_id=%s AND ma.deleted=0""",
        user_id)

    # comments
    logger.info("- comment records")
    db.execute(
        """INSERT IGNORE INTO comment (id, user_id, sharedfile_id, body, deleted, created_at, updated_at) SELECT mc.id, mc.user_id, mc.sharedfile_id, mc.body, mc.deleted, mc.created_at, mc.updated_at FROM mlkshk_comment mc WHERE mc.user_id=%s AND mc.deleted=0""",
        user_id)

    # conversation
    logger.info("- conversation records")
    db.execute(
        """INSERT IGNORE INTO conversation (id, user_id, sharedfile_id, muted, created_at, updated_at) SELECT mc.id, mc.user_id, mc.sharedfile_id, mc.muted, mc.created_at, mc.updated_at FROM mlkshk_conversation mc WHERE mc.user_id=%s""",
        user_id)

    # favorites
    logger.info("- favorite records")
    db.execute(
        """INSERT IGNORE INTO favorite (id, user_id, sharedfile_id, deleted, created_at, updated_at) SELECT mf.id, mf.user_id, mf.sharedfile_id, mf.deleted, mf.created_at, mf.updated_at FROM mlkshk_favorite mf WHERE mf.user_id=%s AND mf.deleted=0""",
        user_id)

    # invitation_request
    logger.info("- invitation_request records")
    db.execute(
        """INSERT IGNORE INTO invitation_request (id, user_id, manager_id, shake_id, deleted, created_at, updated_at) SELECT mi.id, mi.user_id, mi.manager_id, mi.shake_id, mi.deleted, mi.created_at, mi.updated_at FROM mlkshk_invitation_request mi WHERE mi.user_id=%s AND mi.deleted=0""",
        user_id)

    # notification
    logger.info("- notification records")
    db.execute(
        """INSERT IGNORE INTO notification (id, sender_id, receiver_id, action_id, type, deleted, created_at) SELECT mn.id, mn.sender_id, mn.receiver_id, mn.action_id, mn.type, mn.deleted, mn.created_at FROM mlkshk_notification mn WHERE mn.receiver_id=%s AND mn.deleted=0""",
        user_id)

    # shake
    logger.info("- shake records")
    db.execute("""UPDATE shake SET deleted=0 WHERE user_id=%s""", user_id)

    # shake_manager
    logger.info("- shake_manager records")
    db.execute(
        """INSERT IGNORE INTO shake_manager (id, shake_id, user_id, deleted, created_at, updated_at) SELECT ms.id, ms.shake_id, ms.user_id, ms.deleted, ms.created_at, ms.updated_at FROM mlkshk_shake_manager ms WHERE ms.user_id=%s AND ms.deleted=0""",
        user_id)

    # shakesharedfile
    logger.info("- shakesharedfile records")
    db.execute(
        """INSERT IGNORE INTO shakesharedfile (id, shake_id, sharedfile_id, deleted, created_at) SELECT ms.id, ms.shake_id, ms.sharedfile_id, ms.deleted, ms.created_at FROM mlkshk_shakesharedfile ms WHERE ms.shake_id IN (SELECT DISTINCT id FROM shake WHERE deleted=0 AND user_id=%s) AND ms.deleted=0""",
        user_id)

    # sharedfile
    logger.info("- sharedfile records")
    db.execute(
        """INSERT IGNORE INTO sharedfile (id, source_id, user_id, name, title, source_url, description, share_key, content_type, size, activity_at, parent_id, original_id, deleted, like_count, save_count, view_count, updated_at, created_at) SELECT ms.id, ms.source_id, ms.user_id, ms.name, ms.title, ms.source_url, ms.description, ms.share_key, ms.content_type, ms.size, ms.activity_at, ms.parent_id, ms.original_id, ms.deleted, ms.like_count, ms.save_count, ms.view_count, ms.updated_at, ms.created_at FROM mlkshk_sharedfile ms WHERE ms.user_id=%s AND ms.deleted=0""",
        user_id)

    # nsfw_log
    logger.info("- nsfw_log records")
    db.execute(
        """INSERT IGNORE INTO nsfw_log (id, user_id, sharedfile_id, sourcefile_id, created_at) SELECT mn.id, mn.user_id, mn.sharedfile_id, mn.sourcefile_id, mn.created_at FROM mlkshk_nsfw_log mn WHERE mn.user_id=%s""",
        user_id)

    # magicfile
    logger.info("- magicfile records")
    db.execute(
        """INSERT IGNORE INTO magicfile (id, sharedfile_id, created_at) SELECT mm.id, mm.sharedfile_id, mm.created_at FROM mlkshk_magicfile mm WHERE mm.sharedfile_id IN (SELECT id FROM sharedfile WHERE user_id=%s)""",
        user_id)

    # subscription
    logger.info("- subscription records")
    db.execute(
        """INSERT IGNORE INTO subscription (id, user_id, shake_id, deleted, created_at, updated_at) SELECT ms.id, ms.user_id, ms.shake_id, ms.deleted, ms.created_at, ms.updated_at FROM mlkshk_subscription ms WHERE ms.user_id=%s AND ms.deleted=0""",
        user_id)

    # tagged_file
    logger.info("- tagged_file records")
    db.execute(
        """INSERT IGNORE INTO tagged_file (id, tag_id, sharedfile_id, deleted, created_at) SELECT mt.id, mt.tag_id, mt.sharedfile_id, mt.deleted, mt.created_at FROM mlkshk_tagged_file mt WHERE mt.sharedfile_id IN (SELECT DISTINCT id FROM sharedfile WHERE user_id=%s) AND mt.deleted=0""",
        user_id)

    # special handling for post table migration since that thing is so large (300mm rows)
    logger.info("- post records")
    db.execute(
        """INSERT IGNORE INTO post (id, user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) SELECT mp.id, mp.user_id, mp.sourcefile_id, mp.sharedfile_id, mp.seen, mp.deleted, mp.shake_id, mp.created_at FROM mlkshk_post mp WHERE mp.user_id=%s""",
        user_id)
    # now delete any imported soft-deleted rows
    db.execute("""DELETE FROM post WHERE user_id=%s AND deleted=1""", user_id)

    # this should already be done by the web app, but we may running this
    # via a script
    logger.info("- user/migration_state update")
    db.execute("""UPDATE user SET deleted=0 WHERE deleted=2 and id=%s""",
               user_id)
    db.execute("""UPDATE migration_state SET is_migrated=1 WHERE user_id=%s""",
               user_id)

    logger.info("- Migration for user %s complete!" % str(user_id))

    user = db.get("SELECT name, email FROM user WHERE id=%s", user_id)

    db.close()

    if options.postmark_api_key and not options.debug_workers:
        pm = postmark.PMMail(
            api_key=options.postmark_api_key,
            sender="*****@*****.**",
            to=user["email"],
            subject="MLTSHP restore has finished!",
            text_body=
            """We'll keep this short and sweet. Your data should be available now!\n\n"""
            +
            ("""Head over to: https://mltshp.com/user/%s and check it out!""" %
             user["name"]))
        pm.send()