Exemple #1
0
def main():
    db1 = Connection(options.database_host, options.database_user, options.database_password)

    db1.execute("DELETE FROM post WHERE 1")
    ssfs = db1.query("""SELECT shake_id, sharedfile_id from shakesharedfile order by created_at""")
    for shakesharedfile in ssfs:
        sf = db1.get("""SELECT id, source_id, name, deleted, created_at FROM sharedfile WHERE id = %s""", shakesharedfile['sharedfile_id'])
        print "%s. Adding posts for sharedfile: %s created at %s." % (sf.id, sf.name, sf.created_at)
        add_posts(shake_id=shakesharedfile['shake_id'], sharedfile_id=sf['id'], sourcefile_id=sf['source_id'], deleted=sf['deleted'], created_at=sf['created_at'])
Exemple #2
0
def calculate_saves(sharedfile_id, **kwargs):
    """
    Take the id of a sharedfile that just got saved by another user.

    If this file is an original, meaning it has no original_id set, we can safely calculate the save count by how
    many sharedfiles have the file's id as their original_id.
    
    However if the file has an original, we also need to recalculate the original's count, as well as this file's save count. 
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)
    sharedfile = db.get("select original_id from sharedfile where id = %s",
                        sharedfile_id)
    original_id = sharedfile['original_id']

    # If this file is original, calculate it's save count by all sharedfile where this file is the original_id.
    if original_id == 0:
        original_saves = db.get(
            "SELECT count(id) AS count FROM sharedfile where original_id = %s and deleted = 0",
            sharedfile_id)
        db.execute("UPDATE sharedfile set save_count = %s WHERE id = %s",
                   original_saves['count'], sharedfile_id)
    # Otherwise, we need to update the original's save count and this file's save count.
    else:
        original_saves = db.get(
            "SELECT count(id) AS count FROM sharedfile where original_id = %s and deleted = 0",
            original_id)
        db.execute("UPDATE sharedfile set save_count = %s WHERE id = %s",
                   original_saves['count'], original_id)

        # Calc this files new save count, only based on parent since its not original.
        parent_saves = db.get(
            "SELECT count(id) AS count FROM sharedfile where parent_id = %s and deleted = 0",
            sharedfile_id)
        db.execute("UPDATE sharedfile set save_count = %s WHERE id = %s",
                   parent_saves['count'], sharedfile_id)
    db.close()
Exemple #3
0
def calculate_likes(sharedfile_id, **kwargs):
    """
    This task will get all likes for a shared file, count them, then update the sharedfile.like_count
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)
    result = db.get(
        "SELECT count(id) as like_count from favorite where sharedfile_id = %s and deleted=0",
        (sharedfile_id))
    like_count = int(result['like_count'])
    db.execute("UPDATE sharedfile set like_count = %s WHERE id = %s",
               like_count, sharedfile_id)

    tweet_or_magic(db, sharedfile_id, like_count)
    db.close()
Exemple #4
0
def add_posts(shake_id=0,
              sharedfile_id=0,
              sourcefile_id=0,
              deleted=0,
              created_at=None,
              **kwargs):
    """
    This task will get the subscribers for a shake and insert
    a post for every user. If the sourcefile_id exists in posts
    it will set "seen" equal to 1.
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)

    #get subscribers for shake_id
    results = db.query(
        """SELECT user_id as id FROM subscription WHERE shake_id = %s and deleted = 0""",
        shake_id)
    for user in results:
        seen = 0
        #does this sourcefile exist in post for this user?
        existing = db.query(
            """SELECT id FROM post WHERE user_id = %s and sourcefile_id = %s and deleted=0 ORDER BY created_at desc LIMIT 1""",
            user['id'], sourcefile_id)
        #if so, set seen = 1
        if existing:
            seen = 1
        #if not, insert a new row
        if created_at:
            db.execute(
                """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, %s)""",
                user['id'], sourcefile_id, sharedfile_id, seen, deleted,
                shake_id, created_at)
        else:
            db.execute(
                """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, NOW())""",
                user['id'], sourcefile_id, sharedfile_id, seen, deleted,
                shake_id)
    #now insert a post for the user who originally posted it.
    sharedfile = db.get("""SELECT user_id from sharedfile WHERE id = %s""",
                        sharedfile_id)
    db.execute(
        """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, NOW())""",
        sharedfile['user_id'], sourcefile_id, sharedfile_id, 1, 0, shake_id)
    db.close()
Exemple #5
0
def remote_start_mysql_server(v_host,v_os_user,v_os_password,
    v_db_port): 

    # v_mysql_version 1: mysql5 4:mariadb5 5:mariadb10
    # 
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    # 获得Mysql server 版本: mysql5,mariadb5,mariadb10
    v_get_sql = r'''SELECT case mysql_version when 1 then 'mysql5' when 4 then 'mariadb5' when 5 then 'mariadb10' when 6 then 'percona5.6' when 7 then 'pxc5.6' end mysql_version from tag where ip='%s' and port=%d ''' % (v_host,int(v_db_port))

    v_list = db.get(v_get_sql)

    v_mysql_version = v_list['mysql_version']
            

    db.close()

            
    #v_db_socket='--socket=/tmp/mysql'+str(v_db_port)+'.sock'
    #nohup 防止paramiko 进程退出后,中断执行
    # 必须要加 1>/dev/null 2>&1,否则命令不执行。可能是paramiko 模式下,不加的话,执行命令时日志无法输出
    
    v_exe_cmd = r'''nohup /apps/svr/%s/bin/mysqld_safe --defaults-file=%s/%s_%d.cnf 1>/dev/null 2>&1 &''' % (
                v_mysql_version,config.mysql_conf_path,v_mysql_version,v_db_port)

    print v_exe_cmd

    # 远程paramiko调用 在本机执行sql  

    result = remote_shell_cmd_no_result(v_host,v_os_user,v_os_password,v_exe_cmd)

    time.sleep(20)   # 等待20秒 mysql 完全启动,更好的方法是,做个循环判断mysql 完全起来了,再退出



    
    return result  #返回空串表示成功
Exemple #6
0
def remote_off_get_datadir_path(v_host,v_db_port,v_type): 

        
    # v_mysql_version 1: mysql5 4:mariadb5 5:mariadb10
    # 
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    # 获得Mysql server 版本: mysql5,mariadb5,mariadb10
    v_get_sql = r'''SELECT case mysql_version when 1 then 'mysql5' when 4 then 'mariadb5' when 5 then 'mariadb10' when 6 then 'percona5.6' when 7 then 'pxc5.6' end mysql_version from tag where ip='%s' and port=%d ''' % (v_host,int(v_db_port))

    v_list = db.get(v_get_sql)

    v_mysql_version = v_list['mysql_version']
            

    db.close()

    # /apps/dbdat/mariadb10_data3306
    # 
    if v_type==1:

        v_datadir_path = '%s/%s_data%s' % (config.mysql_datadir_path,v_mysql_version,v_db_port)

    else:

        v_datadir_path = '%s_data%s' % (v_mysql_version,v_db_port)
    
   
    print v_datadir_path

    
    
    return v_datadir_path
Exemple #7
0
# to learn how to use it.

# Installation: pip install torndb
# Official doc: http://torndb.readthedocs.org/en/latest/
# Source: https://github.com/bdarnell/torndb/blob/master/torndb.py

from torndb import Connection

# Connect by IP address
db = Connection('127.0.0.1', 'database_name', user='******', password='******')
# Connect by IP address with port
db = Connection('127.0.0.1:1234', 'database_name', user='******', password='******')
# Connect by socket
db = Connection('/tmp/mysql.sock', 'database_name', user='******', password='******')
# Connection over SSH, open a SSH tunnel with
#     ssh -L 1234:localhost:3306 [email protected]
# then connect to 127.0.0.1:1234

# Retreive one object
post = db.get("SELECT * FROM posts LIMIT 1")

# Retreive several objects
for post in db.query("SELECT * FROM posts"):
    print post.title
  
# Insert one entry
db.execute("INSERT INTO posts (user_id, content) VALUES (%s, %s)", 12, "hello world !")

# Insert multiple entries
values = [(12, "hello"), (17, "world"), (22, "blah")]
db.executemany("INSERT INTO posts (user_id, content) VALUES (%s, %s)", values)
Exemple #8
0
def migrate_for_user(user_id=0, **kwargs):
    """
    This tasks handles copying MLKSHK user data over to the MLTSHP tables.

    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)

    state = db.get("SELECT is_migrated FROM migration_state WHERE user_id=%s",
                   user_id)
    if not state or state["is_migrated"] == 1:
        logger.info("User %s already migrated" % str(user_id))
        db.close()
        return

    logger.info("Migrating user_id %s..." % str(user_id))

    logger.info("- app records")
    db.execute(
        """INSERT IGNORE INTO app (id, user_id, title, description, secret, redirect_url, deleted, created_at, updated_at) SELECT ma.id, ma.user_id, ma.title, ma.description, ma.secret, ma.redirect_url, 0, ma.created_at, ma.updated_at FROM mlkshk_app ma WHERE ma.user_id=%s AND ma.deleted=0""",
        user_id)

    # comments
    logger.info("- comment records")
    db.execute(
        """INSERT IGNORE INTO comment (id, user_id, sharedfile_id, body, deleted, created_at, updated_at) SELECT mc.id, mc.user_id, mc.sharedfile_id, mc.body, mc.deleted, mc.created_at, mc.updated_at FROM mlkshk_comment mc WHERE mc.user_id=%s AND mc.deleted=0""",
        user_id)

    # conversation
    logger.info("- conversation records")
    db.execute(
        """INSERT IGNORE INTO conversation (id, user_id, sharedfile_id, muted, created_at, updated_at) SELECT mc.id, mc.user_id, mc.sharedfile_id, mc.muted, mc.created_at, mc.updated_at FROM mlkshk_conversation mc WHERE mc.user_id=%s""",
        user_id)

    # favorites
    logger.info("- favorite records")
    db.execute(
        """INSERT IGNORE INTO favorite (id, user_id, sharedfile_id, deleted, created_at, updated_at) SELECT mf.id, mf.user_id, mf.sharedfile_id, mf.deleted, mf.created_at, mf.updated_at FROM mlkshk_favorite mf WHERE mf.user_id=%s AND mf.deleted=0""",
        user_id)

    # invitation_request
    logger.info("- invitation_request records")
    db.execute(
        """INSERT IGNORE INTO invitation_request (id, user_id, manager_id, shake_id, deleted, created_at, updated_at) SELECT mi.id, mi.user_id, mi.manager_id, mi.shake_id, mi.deleted, mi.created_at, mi.updated_at FROM mlkshk_invitation_request mi WHERE mi.user_id=%s AND mi.deleted=0""",
        user_id)

    # notification
    logger.info("- notification records")
    db.execute(
        """INSERT IGNORE INTO notification (id, sender_id, receiver_id, action_id, type, deleted, created_at) SELECT mn.id, mn.sender_id, mn.receiver_id, mn.action_id, mn.type, mn.deleted, mn.created_at FROM mlkshk_notification mn WHERE mn.receiver_id=%s AND mn.deleted=0""",
        user_id)

    # shake
    logger.info("- shake records")
    db.execute("""UPDATE shake SET deleted=0 WHERE user_id=%s""", user_id)

    # shake_manager
    logger.info("- shake_manager records")
    db.execute(
        """INSERT IGNORE INTO shake_manager (id, shake_id, user_id, deleted, created_at, updated_at) SELECT ms.id, ms.shake_id, ms.user_id, ms.deleted, ms.created_at, ms.updated_at FROM mlkshk_shake_manager ms WHERE ms.user_id=%s AND ms.deleted=0""",
        user_id)

    # shakesharedfile
    logger.info("- shakesharedfile records")
    db.execute(
        """INSERT IGNORE INTO shakesharedfile (id, shake_id, sharedfile_id, deleted, created_at) SELECT ms.id, ms.shake_id, ms.sharedfile_id, ms.deleted, ms.created_at FROM mlkshk_shakesharedfile ms WHERE ms.shake_id IN (SELECT DISTINCT id FROM shake WHERE deleted=0 AND user_id=%s) AND ms.deleted=0""",
        user_id)

    # sharedfile
    logger.info("- sharedfile records")
    db.execute(
        """INSERT IGNORE INTO sharedfile (id, source_id, user_id, name, title, source_url, description, share_key, content_type, size, activity_at, parent_id, original_id, deleted, like_count, save_count, view_count, updated_at, created_at) SELECT ms.id, ms.source_id, ms.user_id, ms.name, ms.title, ms.source_url, ms.description, ms.share_key, ms.content_type, ms.size, ms.activity_at, ms.parent_id, ms.original_id, ms.deleted, ms.like_count, ms.save_count, ms.view_count, ms.updated_at, ms.created_at FROM mlkshk_sharedfile ms WHERE ms.user_id=%s AND ms.deleted=0""",
        user_id)

    # nsfw_log
    logger.info("- nsfw_log records")
    db.execute(
        """INSERT IGNORE INTO nsfw_log (id, user_id, sharedfile_id, sourcefile_id, created_at) SELECT mn.id, mn.user_id, mn.sharedfile_id, mn.sourcefile_id, mn.created_at FROM mlkshk_nsfw_log mn WHERE mn.user_id=%s""",
        user_id)

    # magicfile
    logger.info("- magicfile records")
    db.execute(
        """INSERT IGNORE INTO magicfile (id, sharedfile_id, created_at) SELECT mm.id, mm.sharedfile_id, mm.created_at FROM mlkshk_magicfile mm WHERE mm.sharedfile_id IN (SELECT id FROM sharedfile WHERE user_id=%s)""",
        user_id)

    # subscription
    logger.info("- subscription records")
    db.execute(
        """INSERT IGNORE INTO subscription (id, user_id, shake_id, deleted, created_at, updated_at) SELECT ms.id, ms.user_id, ms.shake_id, ms.deleted, ms.created_at, ms.updated_at FROM mlkshk_subscription ms WHERE ms.user_id=%s AND ms.deleted=0""",
        user_id)

    # tagged_file
    logger.info("- tagged_file records")
    db.execute(
        """INSERT IGNORE INTO tagged_file (id, tag_id, sharedfile_id, deleted, created_at) SELECT mt.id, mt.tag_id, mt.sharedfile_id, mt.deleted, mt.created_at FROM mlkshk_tagged_file mt WHERE mt.sharedfile_id IN (SELECT DISTINCT id FROM sharedfile WHERE user_id=%s) AND mt.deleted=0""",
        user_id)

    # special handling for post table migration since that thing is so large (300mm rows)
    logger.info("- post records")
    db.execute(
        """INSERT IGNORE INTO post (id, user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) SELECT mp.id, mp.user_id, mp.sourcefile_id, mp.sharedfile_id, mp.seen, mp.deleted, mp.shake_id, mp.created_at FROM mlkshk_post mp WHERE mp.user_id=%s""",
        user_id)
    # now delete any imported soft-deleted rows
    db.execute("""DELETE FROM post WHERE user_id=%s AND deleted=1""", user_id)

    # this should already be done by the web app, but we may running this
    # via a script
    logger.info("- user/migration_state update")
    db.execute("""UPDATE user SET deleted=0 WHERE deleted=2 and id=%s""",
               user_id)
    db.execute("""UPDATE migration_state SET is_migrated=1 WHERE user_id=%s""",
               user_id)

    logger.info("- Migration for user %s complete!" % str(user_id))

    user = db.get("SELECT name, email FROM user WHERE id=%s", user_id)

    db.close()

    if options.postmark_api_key and not options.debug_workers:
        pm = postmark.PMMail(
            api_key=options.postmark_api_key,
            sender="*****@*****.**",
            to=user["email"],
            subject="MLTSHP restore has finished!",
            text_body=
            """We'll keep this short and sweet. Your data should be available now!\n\n"""
            +
            ("""Head over to: https://mltshp.com/user/%s and check it out!""" %
             user["name"]))
        pm.send()
def add_single_backup_job(v_setup_id):  # mysql_ins_bak_setup 表的当前更新或插入的备份实例id

    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    scheduler = BackgroundScheduler()  # 默认内存的jobstore

    url = "sqlite:////home/apps/dbajob.sqlite"

    scheduler.add_jobstore("sqlalchemy", url=url, alias="sqlite_js")

    scheduler.print_jobs()

    print "a"

    # 连接配置中心库,获取数据库备份周期等信息
    db = Connection("/tmp/mysql3306.sock", config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone="+8:00")

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id and a.id=%d""" % (
        v_setup_id
    )

    print v_sql

    bak_server = db.get(v_sql)

    instance_id = bak_server["instance_id"]

    from_host = bak_server["ip"]

    # print from_host

    mysql_port = bak_server["port"]

    backup_interval_type = bak_server["backup_interval_type"]

    backup_start_time = bak_server["backup_start_time"]

    str_start_date = time.strftime("%Y-%m-%d") + " " + backup_start_time

    print str_start_date

    v_job_id = "backup_%s_%s" % (from_host, str(mysql_port))

    if backup_interval_type == 1:  # every day

        # scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            days=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    elif backup_interval_type == 2:  # every week weeks=1

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            weeks=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    elif backup_interval_type == 3:  # every hour hours=1

        scheduler.add_job(
            backup,
            "interval",
            id=v_job_id,
            hours=1,
            start_date=str_start_date,
            args=[from_host, mysql_port, os_user, os_password],
            replace_existing=True,
            jobstore="sqlite_js",
        )

    else:
        pass

    scheduler.print_jobs()

    print "b"

    scheduler.start()

    scheduler.print_jobs()

    print "c"

    # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

    v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
    values(%d,0)""" % (
        instance_id
    )

    db.execute(v_sche_start_sql)

    db.close()