Exemple #1
0
def main():
    db1 = Connection(options.database_host, options.database_user,
                     options.database_password)

    #grab all shared files in order
    sfs = db1.query("""SELECT id FROM sharedfile ORDER BY created_at""")
    #for each, get counts

    for sf in sfs:
        likes = 0
        saves = 0

        like_counts = db1.query(
            "SELECT count(id) as like_count from favorite where sharedfile_id = %s and deleted=0",
            (sf.id))
        if like_counts:
            likes = like_counts[0]['like_count']

        save_counts = db1.query(
            "SELECT count(id) AS save_count FROM sharedfile WHERE original_id = %s and deleted = 0",
            sf.id)
        if save_counts[0]['save_count'] > 0:
            saves = save_counts[0]['save_count']
        else:
            save_counts = db1.query(
                "SELECT count(id) AS save_count FROM sharedfile WHERE parent_id = %s and deleted = 0",
                sf.id)
            saves = save_counts[0]['save_count']

        if likes > 0 or saves > 0:
            print "UPDATE sharedfile SET like_count = %s, save_count = %s WHERE id = %s" % (
                likes, saves, sf.id)
            print db1.execute(
                "UPDATE sharedfile SET like_count = %s, save_count = %s WHERE id = %s",
                likes, saves, sf.id)
Exemple #2
0
    def get(self):
        category = self.get_argument("category", "all")
        cur_page = self.get_argument("page", "0")
        query = self.get_argument("query", "")
        num_per_page = 5

        db = Connection(settings.DATABASE_SERVER,
                        settings.DATABASE_NAME,
                        settings.DATABASE_USER,
                        settings.DATABASE_PASSWORD,
                        )

        condition = "WHERE category='{0}'".format(category)
        if category == "all":
            condition = ""
        if query:
            condition = """WHERE UPPER(title) LIKE '%%{0}%%'
                           OR UPPER(profile) LIKE '%%{0}%%'
                           OR UPPER(author) LIKE '%%{0}%%'
                           OR UPPER(content) LIKE '%%{0}%%'
                        """.format(query.upper())

        sql = "SELECT COUNT(*) FROM articles {0}".format(condition)
        count = db.query(sql)[0]["COUNT(*)"]
        max_page = int(math.ceil((count + 0.0) / num_per_page))

        sql = """SELECT articles.id AS id, title, profile, author, url, picUrl,
                        articles.time AS time,
                        (SELECT COUNT(*) FROM article_reads
                         WHERE article_reads.article_id=articles.id) AS read_count,
                        (SELECT COUNT(*) FROM article_comment
                         WHERE article_comment.article_id=articles.id) AS comment_count
                 FROM articles
                 {0}
                 ORDER BY articles.time DESC
                 LIMIT {1}, {2};
              """.format(condition, int(cur_page) * num_per_page, num_per_page)
        articles = db.query(sql)

        for art in articles:
            # art["read_count"], art["comment_count"] = \
            #     get_article_statistics(db, art["id"])
            art["time"] = art["time"].strftime("%Y-%m-%d")
            art["picUrl"] = "/image-proxy/?url={0}".format(
                urllib.quote(art["picUrl"])
            )

        kwargs = dict(articles=articles,
                      category=category,
                      cur_page=int(cur_page),
                      max_page=max_page)

        self.render("", kwargs=kwargs)
Exemple #3
0
def read_db(info):
    """Get info from Racktables DB"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if object_id already exists
    object_id = ""
    for item in db.query("select * from Object where name='{0}'".format(
            info['hostname'])):
        object_id = item.id
    if not object_id:
        print "Object:{0} does not exist".format(info['hostname'])
        return False

    # get the location info
    rack_id_list = []
    unit_no_list = []
    for item in db.query(
            "select rack_id,unit_no from RackSpace where object_id=(select id from Object where name='{0}')"
            .format(info['hostname'])):
        rack_id_list.append(int(item.rack_id))
        unit_no_list.append(int(item.unit_no))
    if not item:
        print "Object:{0} does not have location info".format(info['hostname'])
        return False
    rack_id = ','.join(str(i) for i in list(set(rack_id_list)))
    unit_no = ','.join(str(i) for i in list(set(unit_no_list)))

    location_name = ""
    row_name = ""
    rack_name = ""
    for item in db.query(
            "select location_name,row_name,name from Rack where id='{0}'".
            format(rack_id)):
        location_name = item.location_name
        row_name = item.row_name
        rack_name = item.name
    if not location_name or not row_name or not rack_name:
        print "Object:{0} does not have location info".format(info['hostname'])
        return False
    print "RACKSPACE:   {0}:{1}:{2}:{3}".format(location_name, row_name,
                                                rack_name, unit_no)

    # close db
    db.close()

    return {
        'location_name': location_name,
        'row_name': row_name,
        'rack_name': rack_name,
        'unit_no': unit_no
    }
Exemple #4
0
def list_object(info):
    """List the objects of the given rackspace"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if rackspace is correct
    rs_info = opts['hostname'].split(':')
    colo = "".join(rs_info[0:1])
    row = "".join(rs_info[1:2])
    rack = "".join(rs_info[2:3])
    if not rack:
        print "The rackspace is not correct"
        return False

    # get rack_id
    for item in db.query(
            "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'"
            .format(rack, colo, row)):
        rack_id = item.id
    if not rack_id:
        print "Failed to get rack_id"
        return False

    # get object_id
    object_id_list = []
    for item in db.query(
            "select * from RackSpace where rack_id={0}".format(rack_id)):
        object_id_list.append(item.object_id)
    if len(object_id_list) == 0:
        print "Failed to get object_id"
        return False

    # get rid of the duplicated items then sort and read one by one
    for object_id in sorted(list(set(object_id_list))):
        for item in db.query(
                "select * from Object where id={0}".format(object_id)):
            object_name = item.name
            object_type_id = item.objtype_id
            for item in db.query(
                    "select * from Dictionary where dict_key={0}".format(
                        object_type_id)):
                object_type_name = item.dict_value
        print "{0}: {1}".format(object_type_name, object_name)

    # close db
    db.close()

    return True
Exemple #5
0
def delete_object(info):
    """Delete object from DB"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if object_id already exists, then create object if not
    object_id = ""
    for item in db.query("select * from Object where name='{0}'".format(info["hostname"])):
        object_id = item.id

    # delete object if already exists
    if object_id:
        url = """http://{0}/racktables/index.php?module=redirect&op=deleteObject&page=depot&tab=addmore&object_id={1}""".format(
            rt_server, object_id
        )
        req = requests.get(url, auth=rt_auth)
        if req.status_code != requests.codes.ok:
            print "Failed to delete the existing object: {0}".format(info["hostname"])
            return False
        else:
            print "OK - Deleted the existing object: {0}".format(info["hostname"])

    # close db
    db.close()

    return True
Exemple #6
0
    def get(self):

        db = Connection(settings.DATABASE_SERVER,
                        settings.DATABASE_NAME,
                        settings.DATABASE_USER,
                        settings.DATABASE_PASSWORD,
                        )

        condition = """WHERE target_user_id='1' and consumed='0'
                        and article_comment.user_id=user.id
                        and article_comment.article_id=articles.id"""

        sql = """SELECT user_id, name AS user_name, email, article_id,
                        article_comment.time AS time, title as article_title,
                        article_comment.content AS content
                 FROM article_comment,user,articles {0}
                 ORDER BY article_comment.time DESC;""".format(condition)
        comments = db.query(sql)

        for comment in comments:
            comment["time"] = convert_to_time_zone(comment["time"],
                                                   "Asia/Shanghai")
            comment["time"] = comment["time"].strftime("%Y-%m-%d %H:%M:%S")

        kwargs = dict(comments=comments,
                      )

        self.render("", kwargs=kwargs)
Exemple #7
0
def do_query(database=None):
    """Perform generic query on database."""
    # Pick up the database credentials
    creds = get_db_creds(database)

    # If we couldn't find corresponding credentials, throw a 404
    if not creds:
        msg = "Unable to find credentials matching {0}."
        return {"ERROR": msg.format(database)}, 404

    # Prepare the database connection
    app.logger.debug("Connecting to %s database (%s)" % (
        database, request.remote_addr))
    db = Connection(**creds)

    # See if we received a query
    sql = request.form.get('sql')
    if not sql:
        sql = request.args.get('sql')
        if not sql:
            return {"ERROR": "SQL query missing from request."}, 400

    # If the query has a percent sign, we need to excape it
    if '%' in sql:
        sql = sql.replace('%', '%%')

    # Attempt to run the query
    try:
        app.logger.info("%s attempting to run \" %s \" against %s database" % (
            request.remote_addr, sql, database))
        results = db.query(sql)
        app.logger.info(results)
    except Exception, e:
        return {"ERROR": ": ".join(str(i) for i in e.args)}, 422
Exemple #8
0
    def get_project_keys_from_mysql(self):
        from torndb import Connection
        db = Connection(
            "%s:%s" % (self.config.MYSQL_HOST, self.config.MYSQL_PORT),
            self.config.MYSQL_DB,
            user=self.config.MYSQL_USER,
            password=self.config.MYSQL_PASS
        )

        query = "select project_id, public_key, secret_key from sentry_projectkey"
        logging.info("Executing query %s in MySQL", query)

        project_keys = {}

        try:
            db_projects = db.query(query)

            if db_projects is None:
                return None

            for project in db_projects:
                logging.info("Updating information for project with id %s...", project.project_id)
                self.add_project(project_keys, project.project_id, project.public_key, project.secret_key)

        finally:
            db.close()
        return project_keys
Exemple #9
0
def do_query(database=None):
    # Pick up the database credentials
    # app.logger.warning("%s requesting access to %s database" % (
    #     request.remote_addr, database))
    creds = get_db_creds(database)

    # If we couldn't find corresponding credentials, throw a 404
    if not creds:
        return {"ERROR": "Unable to find credentials matching %s." % database}
        abort(404)

    # Prepare the database connection
    app.logger.debug("Connecting to %s database (%s)" % (
        database, request.remote_addr))
    db = Connection(**creds)

    # See if we received a query
    sql = request.form.get('sql')
    if not sql:
        sql = request.args.get('sql')
        if not sql:
            return {"ERROR": "SQL query missing from request."}

    # If the query has a percent sign, we need to excape it
    if '%' in sql:
        sql = sql.replace('%', '%%')

    # Attempt to run the query
    try:
        app.logger.info("%s attempting to run \" %s \" against %s database" % (
            request.remote_addr, sql, database))
        results = db.query(sql)
        app.logger.info(results)
    except Exception, e:
        return {"ERROR": ": ".join(str(i) for i in e.args)}
Exemple #10
0
def delete_object(info):
    """Delete object from DB"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if object_id already exists, then create object if not
    object_id = ""
    for item in db.query("select * from Object where name='{0}'".format(
            info['hostname'])):
        object_id = item.id

    # delete object if already exists
    if object_id:
        url = """http://{0}/racktables/index.php?module=redirect&op=deleteObject&page=depot&tab=addmore&object_id={1}"""\
              .format(rt_server,object_id)
        req = requests.get(url, auth=rt_auth)
        if req.status_code != requests.codes.ok:
            print "Failed to delete the existing object: {0}".format(
                info['hostname'])
            return False
        else:
            print "OK - Deleted the existing object: {0}".format(
                info['hostname'])

    # close db
    db.close()

    return True
Exemple #11
0
    def get(self):
        # template_name = "article_list.html"
        template_name = "mobile/article_list.html"
        category = self.get_argument("category", "all")
        cur_page = self.get_argument("page", "0")
        query = self.get_argument("query", "")
        num_per_page = 5

        db = Connection(settings.DATABASE_SERVER,
                        settings.DATABASE_NAME,
                        settings.DATABASE_USER,
                        settings.DATABASE_PASSWORD,
                        )

        condition = "WHERE category='{0}'".format(category)
        if category == "all":
            condition = ""
        if query:
            condition = """WHERE UPPER(title) LIKE '%%{0}%%'
                           OR UPPER(profile) LIKE '%%{0}%%'
                           OR UPPER(author) LIKE '%%{0}%%'
                           OR UPPER(content) LIKE '%%{0}%%'
                        """.format(query.upper())

        sql = "SELECT COUNT(*) FROM articles {0}".format(condition)
        count = db.query(sql)[0]["COUNT(*)"]
        max_page = int(math.ceil((count + 0.0) / num_per_page))

        kwargs = dict(
            category=category,
            query=query,
            cur_page=int(cur_page),
            max_page=max_page)

        self.render(template_name, **kwargs)
Exemple #12
0
def update_rack(info, object_id):
    """Automate server audit for rack info into Racktables"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # update the rackspace
    if opts['rackspace']:
        rs_info = opts['rackspace'].split(':')
        colo = "".join(rs_info[0:1])
        row = "".join(rs_info[1:2])
        rack = "".join(rs_info[2:3])
        atom = "".join(rs_info[3:4])
        if not atom:
            print "The rackspace is not correct"
            return False

        # get rack_id
        for item in db.query(
                "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'"
                .format(rack, colo, row)):
            rack_id = item.id
        if not rack_id:
            print "Failed to get rack_id"
            return False

        atom_list = atom.split(',')
        atom_data = []
        for i in atom_list:
            if opts['rackposition']:
                if opts['rackposition'] in ['left', 'front']:
                    atom_data.append("&atom_{0}_{1}_0=on".format(rack_id, i))
                if opts['rackposition'] in ['right', 'back']:
                    atom_data.append("&atom_{0}_{1}_2=on".format(rack_id, i))
                if opts['rackposition'] in ['interior']:
                    atom_data.append("&atom_{0}_{1}_1=on".format(rack_id, i))
            else:
                atom_data.append(
                    "&atom_{0}_{1}_0=on&atom_{0}_{1}_1=on&atom_{0}_{1}_2=on".
                    format(rack_id, i))
        atom_url = "".join(atom_data)

        url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=rackspace&op=updateObjectAllocation""".format(
            rt_server)
        payload = """object_id={0}&rackmulti%5B%5D={1}&comment=&got_atoms=Save{2}"""\
                  .format(object_id,rack_id,atom_url)
        req = requests.post(url,
                            data=payload,
                            headers=rt_headers,
                            auth=rt_auth)
        if req.status_code != requests.codes.ok:
            print "Failed to update rackspace"
            return False
        print "OK - Updated rackspace"

    # close db
    db.close()

    # end
    return True
Exemple #13
0
    def get(self, article_id):
        # template_name = "article_details.html"
        template_name = "mobile/article_details.html"

        db = Connection(settings.DATABASE_SERVER,
                        settings.DATABASE_NAME,
                        settings.DATABASE_USER,
                        settings.DATABASE_PASSWORD,
                        )

        sql = "SELECT * FROM articles WHERE id='{0}'".format(
            article_id)
        article = db.query(sql)[0]

        # article["read_count"], article["comment_count"] = \
        #     get_article_statistics(db, article_id)
        article["url"] = urllib.quote(article["url"])

        # Update article read count
        now = datetime.datetime.now()
        sql = """INSERT INTO article_reads (`article_id`, `user_id`, `time`)
                 VALUES ('{0}', '{1}', '{2}')
              """.format(article_id, 0, now)
        db.execute(sql)

        kwargs = dict(article=article,
                      day=article["day"])

        super(ArticleDetailsHandler, self).render(
            template_name,
            **kwargs
        )
Exemple #14
0
def update_rack(info, object_id):
    """Automate server audit for rack info into Racktables"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # update the rackspace
    if opts["rackspace"]:
        rs_info = opts["rackspace"].split(":")
        colo = "".join(rs_info[0:1])
        row = "".join(rs_info[1:2])
        rack = "".join(rs_info[2:3])
        atom = "".join(rs_info[3:4])
        if not atom:
            print "The rackspace is not correct"
            return False

        # get rack_id
        for item in db.query(
            "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'".format(
                rack, colo, row
            )
        ):
            rack_id = item.id
        if not rack_id:
            print "Failed to get rack_id"
            return False

        atom_list = atom.split(",")
        atom_data = []
        for i in atom_list:
            if opts["rackposition"]:
                if opts["rackposition"] in ["left", "front"]:
                    atom_data.append("&atom_{0}_{1}_0=on".format(rack_id, i))
                if opts["rackposition"] in ["right", "back"]:
                    atom_data.append("&atom_{0}_{1}_2=on".format(rack_id, i))
                if opts["rackposition"] in ["interior"]:
                    atom_data.append("&atom_{0}_{1}_1=on".format(rack_id, i))
            else:
                atom_data.append("&atom_{0}_{1}_0=on&atom_{0}_{1}_1=on&atom_{0}_{1}_2=on".format(rack_id, i))
        atom_url = "".join(atom_data)

        url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=rackspace&op=updateObjectAllocation""".format(
            rt_server
        )
        payload = """object_id={0}&rackmulti%5B%5D={1}&comment=&got_atoms=Save{2}""".format(
            object_id, rack_id, atom_url
        )
        req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
        if req.status_code != requests.codes.ok:
            print "Failed to update rackspace"
            return False
        print "OK - Updated rackspace"

    # close db
    db.close()

    # end
    return True
Exemple #15
0
def read_db(info):
    """Get info from Racktables DB"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if object_id already exists
    object_id = ""
    for item in db.query("select * from Object where name='{0}'".format(info["hostname"])):
        object_id = item.id
    if not object_id:
        print "Object:{0} does not exist".format(info["hostname"])
        return False

    # get the location info
    rack_id_list = []
    unit_no_list = []
    for item in db.query(
        "select rack_id,unit_no from RackSpace where object_id=(select id from Object where name='{0}')".format(
            info["hostname"]
        )
    ):
        rack_id_list.append(int(item.rack_id))
        unit_no_list.append(int(item.unit_no))
    if not item:
        print "Object:{0} does not have location info".format(info["hostname"])
        return False
    rack_id = ",".join(str(i) for i in list(set(rack_id_list)))
    unit_no = ",".join(str(i) for i in list(set(unit_no_list)))

    location_name = ""
    row_name = ""
    rack_name = ""
    for item in db.query("select location_name,row_name,name from Rack where id='{0}'".format(rack_id)):
        location_name = item.location_name
        row_name = item.row_name
        rack_name = item.name
    if not location_name or not row_name or not rack_name:
        print "Object:{0} does not have location info".format(info["hostname"])
        return False
    print "RACKSPACE:   {0}:{1}:{2}:{3}".format(location_name, row_name, rack_name, unit_no)

    # close db
    db.close()

    return {"location_name": location_name, "row_name": row_name, "rack_name": rack_name, "unit_no": unit_no}
Exemple #16
0
def add_posts(shake_id=0,
              sharedfile_id=0,
              sourcefile_id=0,
              deleted=0,
              created_at=None,
              **kwargs):
    """
    This task will get the subscribers for a shake and insert
    a post for every user. If the sourcefile_id exists in posts
    it will set "seen" equal to 1.
    """
    db = Connection(options.database_host, options.database_name,
                    options.database_user, options.database_password)

    #get subscribers for shake_id
    results = db.query(
        """SELECT user_id as id FROM subscription WHERE shake_id = %s and deleted = 0""",
        shake_id)
    for user in results:
        seen = 0
        #does this sourcefile exist in post for this user?
        existing = db.query(
            """SELECT id FROM post WHERE user_id = %s and sourcefile_id = %s and deleted=0 ORDER BY created_at desc LIMIT 1""",
            user['id'], sourcefile_id)
        #if so, set seen = 1
        if existing:
            seen = 1
        #if not, insert a new row
        if created_at:
            db.execute(
                """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, %s)""",
                user['id'], sourcefile_id, sharedfile_id, seen, deleted,
                shake_id, created_at)
        else:
            db.execute(
                """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, NOW())""",
                user['id'], sourcefile_id, sharedfile_id, seen, deleted,
                shake_id)
    #now insert a post for the user who originally posted it.
    sharedfile = db.get("""SELECT user_id from sharedfile WHERE id = %s""",
                        sharedfile_id)
    db.execute(
        """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, NOW())""",
        sharedfile['user_id'], sourcefile_id, sharedfile_id, 1, 0, shake_id)
    db.close()
Exemple #17
0
def list_object(info):
    """List the objects of the given rackspace"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # check if rackspace is correct
    rs_info = opts["hostname"].split(":")
    colo = "".join(rs_info[0:1])
    row = "".join(rs_info[1:2])
    rack = "".join(rs_info[2:3])
    if not rack:
        print "The rackspace is not correct"
        return False

    # get rack_id
    for item in db.query(
        "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'".format(rack, colo, row)
    ):
        rack_id = item.id
    if not rack_id:
        print "Failed to get rack_id"
        return False

    # get object_id
    object_id_list = []
    for item in db.query("select * from RackSpace where rack_id={0}".format(rack_id)):
        object_id_list.append(item.object_id)
    if len(object_id_list) == 0:
        print "Failed to get object_id"
        return False

    # get rid of the duplicated items then sort and read one by one
    for object_id in sorted(list(set(object_id_list))):
        for item in db.query("select * from Object where id={0}".format(object_id)):
            object_name = item.name
            object_type_id = item.objtype_id
            for item in db.query("select * from Dictionary where dict_key={0}".format(object_type_id)):
                object_type_name = item.dict_value
        print "{0}: {1}".format(object_type_name, object_name)

    # close db
    db.close()

    return True
Exemple #18
0
def main():
    db1 = Connection(options.database_host, options.database_user, options.database_password)

    db1.execute("DELETE FROM post WHERE 1")
    ssfs = db1.query("""SELECT shake_id, sharedfile_id from shakesharedfile order by created_at""")
    for shakesharedfile in ssfs:
        sf = db1.get("""SELECT id, source_id, name, deleted, created_at FROM sharedfile WHERE id = %s""", shakesharedfile['sharedfile_id'])
        print "%s. Adding posts for sharedfile: %s created at %s." % (sf.id, sf.name, sf.created_at)
        add_posts(shake_id=shakesharedfile['shake_id'], sharedfile_id=sf['id'], sourcefile_id=sf['source_id'], deleted=sf['deleted'], created_at=sf['created_at'])
def get_query_result(query, **kw_dict):
    """get query result
    """
    conn = Connection(**MYSQL_SETTINGS)
    if kw_dict is None:
        try:
            result = conn.query(query)
            conn.close()
            return result
        except Exception, e:
            print e
Exemple #20
0
def update_blank_switch_security_pdu_offline(info):
    """Automate server autodir for PatchPanel/NetworkSwitch/NetworkSecurity/PDU into Racktables or as offline mode"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # delete object if already exists
    delete_object(info)

    # create object
    url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format(rt_server)
    if opts["blank"]:
        payload = """0_object_type_id=9&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format(
            info["hostname"]
        )
    if opts["switch"]:
        payload = """0_object_type_id=8&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format(
            info["hostname"]
        )
    if opts["security"]:
        payload = """0_object_type_id=798&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format(
            info["hostname"]
        )
    if opts["pdu"]:
        payload = """0_object_type_id=2&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format(
            info["hostname"]
        )
    if opts["offline"]:
        payload = """0_object_type_id=4&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format(
            info["hostname"]
        )

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to create object: {0}".format(info["hostname"])
        return False
    else:
        print "OK - Created object: {0}".format(info["hostname"])

    # get object_id
    for item in db.query("select * from Object where name='{0}'".format(info["hostname"])):
        object_id = item.id
    if not object_id:
        print "Failed to get object_id"
        return False

    # update rack info
    update_rack(info, object_id)

    # close db
    db.close()

    # end
    return True
def main():
    db1 = Connection(options.database_host, options.database_user,
                     options.database_password)

    #grab all group shakes
    shakes = db1.query("""SELECT id, user_id FROM shake WHERE type=%s""",
                       "group")

    for shake in shakes:
        db1.execute(
            """INSERT IGNORE INTO subscription (user_id, shake_id, deleted, created_at, updated_at)
                        VALUES (%s, %s, 0, NOW(), NOW())""", shake['user_id'],
            shake['id'])
Exemple #22
0
def update_blank_switch_security_pdu_offline(info):
    """Automate server autodir for PatchPanel/NetworkSwitch/NetworkSecurity/PDU into Racktables or as offline mode"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # delete object if already exists
    delete_object(info)

    # create object
    url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format(
        rt_server)
    if opts['blank']:
        payload = """0_object_type_id=9&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\
                  .format(info['hostname'])
    if opts['switch']:
        payload = """0_object_type_id=8&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\
                  .format(info['hostname'])
    if opts['security']:
        payload = """0_object_type_id=798&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\
                  .format(info['hostname'])
    if opts['pdu']:
        payload = """0_object_type_id=2&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\
                  .format(info['hostname'])
    if opts['offline']:
        payload = """0_object_type_id=4&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\
                  .format(info['hostname'])

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to create object: {0}".format(info['hostname'])
        return False
    else:
        print "OK - Created object: {0}".format(info['hostname'])

    # get object_id
    for item in db.query("select * from Object where name='{0}'".format(
            info['hostname'])):
        object_id = item.id
    if not object_id:
        print "Failed to get object_id"
        return False

    # update rack info
    update_rack(info, object_id)

    # close db
    db.close()

    # end
    return True
Exemple #23
0
def get_job_status(v_job_id):

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_get_sql = '''SELECT cur_cum_prog_desc,cur_prog_com_rate,cur_prog_shell_cmd from dba_job_progress where id=(select max(id) from dba_job_progress where job_id=%d)''' % (
        v_job_id)

    job_list = db.query(v_get_sql)
            

    db.close()

    return job_list
Exemple #24
0
def get_domain_list_from_env(v_role_id,belong_env):

    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')


    str_sql = '''select b.id,b.name from resources_role_app a,resources_app b where 
    a.app_id = b.id and a.role_id = %d and b.app_type= %d ''' % (v_role_id,belong_env)
        
    

    app_list = db.query(str_sql)

    db.close()

    return app_list
Exemple #25
0
 def get_latest_article(self, from_user, to_user, timestamp):
     db = Connection(settings.DATABASE_SERVER,
                     settings.DATABASE_NAME,
                     settings.DATABASE_USER,
                     settings.DATABASE_PASSWORD,
                     )
     sql = "SELECT * FROM articles ORDER BY time DESC LIMIT 1"
     article = db.query(sql)[0]
     return self.make_single_pic_response(
         from_user,
         to_user,
         timestamp,
         article["title"],
         article["profile"],
         article["picUrl"],
         "{0}/m#/article_details/{1}".format(
             settings.SITE_HTTP_URL,
             article["id"]
         )
     )
Exemple #26
0
 def search_for_articles(self, from_user, to_user, timestamp, query):
     db = Connection(settings.DATABASE_SERVER,
                     settings.DATABASE_NAME,
                     settings.DATABASE_USER,
                     settings.DATABASE_PASSWORD,
                     )
     # Simple way to avoid SQL insertion attack
     if query.strip() and ";" not in query:
         condition = u"""WHERE UPPER(title) LIKE '%%{0}%%'
                        OR UPPER(profile) LIKE '%%{0}%%'
                        OR UPPER(author) LIKE '%%{0}%%'
                        OR UPPER(content) LIKE '%%{0}%%'
                     """.format(query.strip().upper())
     else:
         condition = ""
     sql = u"SELECT * FROM articles {0} ORDER BY time DESC LIMIT 10".format(
         condition)
     articles = db.query(sql)
     if len(articles) > 0:
         return self.make_multi_pic_response(
             from_user,
             to_user,
             timestamp,
             [article["title"] for article in articles],
             [article["profile"] for article in articles],
             [article["picUrl"] for article in articles],
             ["{0}/m#/article_details/{1}".format(
                 settings.SITE_HTTP_URL,
                 article["id"]
             ) for article in articles])
     else:
         return self.make_text_response(
             from_user,
             to_user,
             timestamp,
             u"还没有关于“%s”的内容哦!你有什么想法呢?告诉微君吧!" % query)
Exemple #27
0
    def update(self):
        config = self.application.config
        db = Connection(
            "%s:%s" % (config.MYSQL_HOST, config.MYSQL_PORT),
            config.MYSQL_DB,
            user=config.MYSQL_USER,
            password=config.MYSQL_PASS
        )

        query = "select project_id, public_key, secret_key from sentry_projectkey"
        logging.info("Executing query %s in MySQL" % query)

        projects = {}

        try:
            db_projects = db.query(query)

            if db_projects is None:
                logging.warn("Could not retrieve nformation from sentry's database because MySQL Server was unavailable")
                return

            for project in db_projects:
                logging.info("Updating information for project with id %s..." % project.project_id)

                if not project.project_id in projects.keys():
                    projects[project.project_id] = {
                        "public_key": [],
                        "secret_key": []
                    }

                projects[project.project_id]['public_key'].append(project.public_key)
                projects[project.project_id]['secret_key'].append(project.secret_key)

            self.application.project_keys = projects
        finally:
            db.close()
Exemple #28
0
def upload_user_stats():  

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    db.execute('truncate table meta_table_statistics')  # 先truncate 再插入

    db.execute('truncate table meta_index_statistics')  # 先truncate 再插入

    


    # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64
    v_sql = r"""SELECT b.ip,b.port,b.id as instance_id,a.id as schema_id,a.name as db_name from 
    resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id"""

    #print v_sql

    upload_tables_list = db.query(v_sql)

    if upload_tables_list: # 对实例表进行循环

        i=0

        

        for upload_table in upload_tables_list:

            instance_id = upload_table['instance_id']

            schema_id = upload_table['schema_id']

            db_name = upload_table['db_name']

            host_ip = upload_table['ip']

            mysql_port = upload_table['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')

            # 取表的元信息然后插入

            v_pl = r"""SELECT table_schema,table_name,rows_read,rows_changed,rows_changed_x_indexes 
            from table_statistics where table_schema='%s' """ % (db_name)

            #print v_pl

            table_list = db_remote.query(v_pl)

            for table_row in table_list:

            

                table_schema = table_row['table_schema']

                table_name = table_row['table_name']

                rows_read = table_row['rows_read']

                rows_changed = table_row['rows_changed']


                rows_changed_x_indexes = table_row['rows_changed_x_indexes']

                
                
                #try:
                v_insert_sql='''insert into meta_table_statistics(instance_id,schema_id,table_schema,
                table_name,rows_read,rows_changed,rows_changed_x_indexes) 
                values(%d,%d,'%s','%s',%d,%d,%d)''' % (
                instance_id,schema_id,table_schema,
                table_name,rows_read,rows_changed,rows_changed_x_indexes
                )

                #print v_insert_sql

                db.execute(v_insert_sql.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            

            # 取索引的元信息,然后插入

            v_pl2 = r"""SELECT table_schema,table_name,index_name,rows_read from index_statistics 
            where TABLE_SCHEMA='%s' """ % (db_name)


            table_list2 = db_remote.query(v_pl2)

            for table_row2 in table_list2:

                

                table_schema = table_row2['table_schema']

                table_name = table_row2['table_name']

                index_name = table_row2['index_name']

                
                rows_read = table_row2['rows_read']

                
                #try:
                v_insert_sql2='''insert into meta_index_statistics(instance_id,schema_id,table_schema,
                table_name,index_name,rows_read) 
                values(%d,%d,'%s','%s','%s',%d)''' % (
                instance_id,schema_id,table_schema,
                table_name,index_name,rows_read
                )

                #print v_insert_sql

                db.execute(v_insert_sql2.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)



            db_remote.close()


            # 开始生成log_hot_tables 统计表数据

            # statistic_time 为统计数据的实际生成时间


            v_insert_sql3='''insert into log_hot_tables(instance_id,schema_id,table_schema,
                table_name,TABLE_COMMENT,TABLE_CREATE_TIME,rows_read,rows_changed,
                rows_changed_x_indexes) select a.instance_id,a.schema_id,a.TABLE_SCHEMA,
                a.TABLE_NAME,b.TABLE_COMMENT,b.CREATE_TIME,a.ROWS_READ,a.ROWS_CHANGED,
                a.ROWS_CHANGED_X_INDEXES from meta_table_statistics a,meta_tables b where 
                a.instance_id=%d and a.schema_id=%d and 
                a.TABLE_SCHEMA='%s' and a.instance_id=b.instance_id and a.TABLE_SCHEMA=b.TABLE_SCHEMA 
                and a.TABLE_NAME=b.TABLE_NAME''' % (
                instance_id,schema_id,table_schema
                )

            print v_insert_sql3

            db.execute(v_insert_sql3.replace('%','%%'))
            


            

            i=i+1


    db.close()

   # 开始远程flush 统计信息表

    # for remote execute

    os_user = '******'

    OS_APPS_PASSWD = config.OS_APPS_PASSWD

    DB_USER = config.DB_USER

    DB_PASSWD = config.DB_PASSWD

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64
    v_sql = r"""SELECT distinct b.ip,b.port from 
    resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id"""

    #print v_sql

    server_list = db.query(v_sql)

    if server_list: # 对实例表进行循环

        i=0

        for single_server in server_list:


            host_ip = single_server['ip']

            mysql_port = single_server['port']


            # 远程paramiko调用 在本机执行sql

            exe_sql = 'flush table_statistics;flush index_statistics;'  

            result = func.remote_exe_sql(host_ip,os_user,OS_APPS_PASSWD,mysql_port,'information_schema',exe_sql,DB_USER,DB_PASSWD)

            if result == '':
                result = '执行成功!'

            print result
class MySQLHelper:

    def __init__(self, host, user, password, database):
        self._db = Connection(host, database, user=user, password=password, max_idle_time=10)
        if not self._db._db:
            raise Exception('%s' % self._db.error_msg)

    def query(self, sql, *parameters):
        return self._db.query(sql, *parameters)

    def query_one(self, sql, *parameters):
        res = self._db.query(sql, *parameters)
        return res.pop() if res else {}

    def write(self, sql, *parameters):
        return self._db.execute(sql, *parameters)

    def gen_insert(self, tablename, rowdict, replace=False):
        return self._db.gen_insert_sql(tablename, rowdict, replace)

    def insert_dict(self, tablename, rowdict):
        key_strs = ", ".join(["""`%s`""" % key for key in rowdict.keys()])
        value_strs = ", ".join(["""'%s'""" % rowdict.get(key) for key in rowdict.keys()])
        sql = """INSERT INTO %s (%s) VALUES (%s)""" % (tablename, key_strs, value_strs)
        return self._db.execute(sql)

    def insert_batch(self, tablename, batch_params):
        value_batch = []
        for param in batch_params:
            keys = param.keys()
            key_strs = ", ".join(["""`%s`""" % key for key in keys])
            value_strs = "(%s)" % ", ".join(
                ["""'%s'""" % "%s" % param.get(key) for key in keys])
            value_batch.append(value_strs)
        sql = """INSERT INTO %s (%s) VALUES %s""" % (tablename, key_strs, ",".join(value_batch))
        return self._db.execute(sql)

    def update_dict(self, tablename, rowdict, where):
        sql = """UPDATE %s SET %s WHERE %s""" % (
        tablename, self._formatter(rowdict, ', '), self._formatter(where, " AND "))
        return self._db.execute(sql)

    def transaction(self, query, parameters):
        return self._db.transaction(query, parameters)

    def get(self, tablename, conds, cols='', extra_conds={}):
        if not tablename:
            return False
        cols = "%s" % ','.join(cols) if cols else '*'
        wheres = []
        values = []
        if conds and isinstance(conds, dict):
            for key, value in conds.items():
                if isinstance(value, (list, tuple)):
                    wheres.append("`%s` IN (%s)" % (key, "'%s'" % "','".join([str(v) for v in value])))
                else:
                    wheres.append("`%s`=%%s" % key)
                    values.append("%s" % value)
        where_str = ' AND '.join(wheres)
        sql = """ SELECT %s FROM `%s` """ % (cols, tablename)
        if where_str:
            sql += """ WHERE %s """ % (where_str)
        if extra_conds.get('group_by'):
            sql += """ GROUP by %s """ % ','.join(extra_conds['group_by'])
        if extra_conds.get('order_by'):
            sql += """ ORDER by %s """ % ','.join(extra_conds['order_by'])
        if extra_conds.get('limit'):
            sql += """ LIMIT %s """ % ','.join(map(str, extra_conds['limit']))

        return self._db.query(sql, *values)


    def _serialize(self, value):
        if isinstance(value, (dict, list, set)):
            value = json.dumps(value)
        else:
            value = "%s" % value
        return value

    def _formatter(self, pairs, delimiter):
        values = []
        for key, value in pairs.items():
            if not isinstance(value, list):
                value = self._serialize(value)
                values.append("""`%s`='%s'""" % (key, value))
            else:
                values.append("""`%s` in ("%s")""" % (key, '","'.join([self._serialize(val) for val in value])))
        return delimiter.join(values)

    def __del__(self):
        self._db.close()
Exemple #30
0
    def get(self):
        db = Connection('127.0.0.1', 'zfz', 'zfz', 'zfz...891', 25200)
        q = self.get_argument(name="query", default="")
        p = self.get_argument(name="price", default="999999999")
        ma = re.search(r'\d+', p)
        if ma is None:
            p = "999999999"
        else:
            p = ma.group(0)

        q = q.lstrip().rstrip().replace("'", "").replace('"', '').replace(
            '#', '').replace('%', '')
        qs = q.split(' ')

        if len(q) > 0:
            if len(qs) == 1:
                m = '%' + q + '%'
                items = db.query(
                    "select title, url, price, area, arch, address, district "
                    "from pages where price <= %s and (address like %s or district like %s or title like %s) "
                    "order by date desc limit 20", p, m, m, m)
            else:
                l = qs[0]
                r = qs[-1]
                m1 = ''
                m2 = ''
                for i in range(1, len(qs) - 1):
                    m1 += '%' + qs[i]
                    m2 += '%' + qs[len(qs) - 1 - i]

                m1 += '%'
                m2 += '%'

                items = db.query(
                    "select title, url, price, area, arch, address, district "
                    "from pages where price <= %s and ((address like %s and district like %s) or "
                    "(address like %s and district like %s) or "
                    "(title like %s and address like %s) or "
                    "(title like %s and address like %s and district like %s) or "
                    #                        "title like %s or "
                    "address like %s) "
                    "order by date desc limit 20",
                    p,
                    '%' + l + m1,
                    '%' + r + '%',
                    '%' + r + m2,
                    '%' + l + '%',
                    '%' + l + '%',
                    m1 + r + '%',
                    '%' + l + '%',
                    m1,
                    '%' + r + '%',
                    #                        '%' + l + m1 + r + '%',
                    '%' + l + m1 + r + '%')
        else:
            items = []

        if len(items) < 1:
            hit = False
        else:
            hit = True

        if p == "999999999":
            p = ""

        self.render("search.html", query=q, price=p, items=items, hit=hit)
Exemple #31
0
# to learn how to use it.

# Installation: pip install torndb
# Official doc: http://torndb.readthedocs.org/en/latest/
# Source: https://github.com/bdarnell/torndb/blob/master/torndb.py

from torndb import Connection

# Connect by IP address
db = Connection('127.0.0.1', 'database_name', user='******', password='******')
# Connect by IP address with port
db = Connection('127.0.0.1:1234', 'database_name', user='******', password='******')
# Connect by socket
db = Connection('/tmp/mysql.sock', 'database_name', user='******', password='******')
# Connection over SSH, open a SSH tunnel with
#     ssh -L 1234:localhost:3306 [email protected]
# then connect to 127.0.0.1:1234

# Retreive one object
post = db.get("SELECT * FROM posts LIMIT 1")

# Retreive several objects
for post in db.query("SELECT * FROM posts"):
    print post.title
  
# Insert one entry
db.execute("INSERT INTO posts (user_id, content) VALUES (%s, %s)", 12, "hello world !")

# Insert multiple entries
values = [(12, "hello"), (17, "world"), (22, "blah")]
db.executemany("INSERT INTO posts (user_id, content) VALUES (%s, %s)", values)
Exemple #32
0
class TelegramBot:
    telegram_url = "https://api.telegram.org/bot{token}/{method}"
    vk_url = "https://api.vk.com/method/{method}?{params}&access_token={access_token}&v=5.67"
    chat_id = CHAT_ID

    @staticmethod
    def split_message_by_chunks(iterable, step=4096):
        for chunk in range(0, len(iterable), step):
            yield iterable[chunk:chunk + step]

    def __init__(self, token, db_creds, vk_token):
        self.access_token = token
        self.db = Connection(**db_creds)
        self.vk_token = vk_token

    def send_telegram_message(self, text):
        if len(text) > 4096:
            chunk_try = []
            log.info('Sending big message by chunks..')
            for chunk in TelegramBot.split_message_by_chunks(text):
                response = requests.post(self.telegram_url.format(token=self.access_token, method='sendmessage'), json={
                    'text': chunk,
                    'chat_id': self.chat_id
                })
                chunk_try.append(True if response.status_code == 200 else False)
            if all(chunk_try):
                log.info('Good response when sending big message from Telegram..successfully sent message')
            else:
                log.error('Something went wrong when sending big message..')
        else:
            log.info('Sending simple message..')
            response = requests.post(self.telegram_url.format(token=self.access_token, method='sendmessage'), json={
                'text': text,
                'chat_id': self.chat_id
            })
        if response.status_code == 200:
            log.info('Good response from Telegram..successfully sent message')

    def send_request(self, url):
        log.info('Checking VK data ..')
        response = requests.get(url=url)
        res = response.json()
        if response.status_code != 200 or 'error' in res:
            log.error('Something went wrong with response from VK...%s' % (res['error'],))
            return
        res = {x['id']: x['text'] for x in filter(lambda x: '#знакомства_парни' not in x['text'] and  # no gays obv
                                                            ('#ищупарня' in x['text'] or
                                                             '#знакомства' in x['text'] or
                                                             '#ищутебя' in x['text']),
                                                  [x for x in res['response']['items']])}
        ids = set(res.keys())
        last = self.db.query("SELECT * FROM messages LIMIT 10")
        new = ids - set(x['id'] for x in last) if last else ids
        if new:
            log.info('Got %s new updates from Odessa Search, querying DB...' % (len(new),))
            self.db.insertmany('INSERT INTO messages set id = %s, message = %s',
                               [(key, str(res[key].encode('utf-8'), 'utf8')) for key in new])
            for i, msg in enumerate(new):
                log.info('Sending messages to Telegram... %s of %s' % (i, len(new)))
                self.send_telegram_message(res[msg])
        else:
            log.warning('No updates from VK...Retrying again in 10 mins...')

    def get_update(self):
        self.send_request(self.vk_url.format(method='wall.get',
                                             params='&'.join(
                                                 [k+'='+v for k, v in
                                                  {'domain': VK_GROUP_DOMAIN,
                                                   'count': '10'}.items()]),
                                             access_token=self.vk_token))
def add_schedule_backup_job():
    # if __name__ == '__main__':
    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    scheduler = BackgroundScheduler()  # 默认内存的jobstore

    url = "sqlite:////home/apps/dbajob.sqlite"

    scheduler.add_jobstore("sqlalchemy", url=url, alias="sqlite_js")

    scheduler.print_jobs()

    print "a"

    scheduler.remove_all_jobs(jobstore="sqlite_js")

    scheduler.print_jobs()

    print "remove"

    # v_current_jobs = scheduler.get_jobs()

    # print v_current_jobs

    # if v_current_jobs:  # 如果job存在的话,先请客

    #     scheduler.remove_job('backup')

    # 连接配置中心库,获取数据库备份周期等信息
    db = Connection("/tmp/mysql3306.sock", config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone="+8:00")

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id """

    print v_sql

    bak_server_list = db.query(v_sql)

    if bak_server_list:  # 有server需要配置

        i = 0

        # 把还没有开始的调度任务,置为手工结束 backup_result_type=4
        v_manual_end_sql = "update mysql_ins_bak_log set backup_result_type=4 where backup_result_type=0"

        db.execute(v_manual_end_sql)

        for bak_server in bak_server_list:

            instance_id = bak_server["instance_id"]

            from_host = bak_server["ip"]

            # print from_host

            mysql_port = bak_server["port"]

            backup_interval_type = bak_server["backup_interval_type"]

            backup_start_time = bak_server["backup_start_time"]

            str_start_date = time.strftime("%Y-%m-%d") + " " + backup_start_time

            print str_start_date

            v_job_id = "backup_%s_%s" % (from_host, str(mysql_port))

            if backup_interval_type == 1:  # every day

                # scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    days=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            elif backup_interval_type == 2:  # every week weeks=1

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    weeks=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            elif backup_interval_type == 3:  # every hour hours=1

                scheduler.add_job(
                    backup,
                    "interval",
                    id=v_job_id,
                    hours=1,
                    start_date=str_start_date,
                    args=[from_host, mysql_port, os_user, os_password],
                    replace_existing=True,
                    jobstore="sqlite_js",
                )

            else:
                pass
            # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

            v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
            values(%d,0)""" % (
                instance_id
            )

            db.execute(v_sche_start_sql)

            i = i + 1

        scheduler.print_jobs()

        print "b"

        scheduler.start()

        scheduler.print_jobs()

        print "c"

    db.close()
Exemple #34
0
def update_db(info):
    """Automate server audit into Racktables"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # get object_type_id
    for item in db.query(
            "select * from Dictionary where dict_value='{0}'".format(
                info['server_type'])):
        object_type_id = item.dict_key

    # delete object if already exists
    delete_object(info)

    # create object
    url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format(
        rt_server)
    if info['server_type'] in ["Server", "XenServer"]:
        payload = """0_object_type_id={0}&0_object_name={1}&0_object_label=&0_object_asset_no={1}&got_fast_data=Go%21"""\
                  .format(object_type_id,info['hostname'])
    if info['server_type'] in ["VM", "EC2"]:
        payload = """virtual_objects=&0_object_type_id={0}&0_object_name={1}&got_fast_data=Go%21"""\
                  .format(object_type_id,info['hostname'])

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to create object: {0}".format(info['hostname'])
        return False
    else:
        print "OK - Created object: {0}".format(info['hostname'])

    object_id = ""
    # get object_id
    for item in db.query("select * from Object where name='{0}'".format(
            info['hostname'])):
        object_id = item.id
    if not object_id:
        print "Failed to get object_id"
        return False

    # get os_release_id
    os_release_key = ""
    for item in db.query(
            "select * from Dictionary where dict_value='{0}'".format(
                info['os_release'])):
        os_release_key = item.dict_key
    if not os_release_key:
        print "Failed to get object_type_id, please add '{0}' to 'Configuration - Dictionary - Server OS type'.".format(
            info['os_release'])
        return False

    # update the informations of object, all post data formats were got by firebug on firefox
    url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=edit&op=update""".format(
        rt_server)
    if info['server_type'] == "Server":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no={2}&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=2&4_value=0&5_attr_id=26&5_value=0&6_attr_id=10006&6_value={6}\
&7_attr_id=10003&7_value={7}&8_attr_id=1&8_value=&9_attr_id=28&9_value=&10_attr_id=21&10_value=&11_attr_id=4&11_value={8}\
&12_attr_id=24&12_value=&13_attr_id=10005&13_value={9}&14_attr_id=25&14_value=&num_attrs=15&object_comment=&submit.x=15&submit.y=13"""\
                  .format(object_id,object_type_id,info['hostname'],info['cpu_cores'],quote_plus(info['disk']),info['fqdn'],
                          info['memory'],quote_plus(info['network']),os_release_key,info['swap'])
    if info['server_type'] == "XenServer":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no={2}&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=1&7_value=&8_attr_id=28&8_value=&9_attr_id=4&9_value={8}&10_attr_id=24&10_value=&11_attr_id=10005&11_value={9}\
&12_attr_id=25&12_value=&13_attr_id=10008&13_value={10}&num_attrs=14&object_comment=&submit.x=18&submit.y=21"""\
                  .format(object_id,object_type_id,info['hostname'],info['cpu_cores'],quote_plus(info['disk']),info['fqdn'],
                          info['memory'],quote_plus(info['network']),os_release_key,info['swap'],quote_plus(info['vm_list']))
    if info['server_type'] == "EC2":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no=&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=10010&7_value={8}&8_attr_id=4&8_value={9}&9_attr_id=24&9_value=&10_attr_id=10005&10_value={10}&num_attrs=11&object_comment=&submit.x=19&submit.y=27"""\
                  .format(object_id,object_type_id,info['hostname'],info['cpu_cores'],quote_plus(info['disk']),info['fqdn'],
                          info['memory'],quote_plus(info['network']),info['ec2_pubname'],os_release_key,info['swap'])
    if info['server_type'] == "VM":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no=&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=10007&7_value={8}&8_attr_id=4&8_value={9}&9_attr_id=24&9_value=&10_attr_id=10005&10_value={10}&num_attrs=11&object_comment=&submit.x=25&submit.y=14'"""\
                  .format(object_id,object_type_id,info['hostname'],info['cpu_cores'],quote_plus(info['disk']),info['fqdn'],
                          info['memory'],quote_plus(info['network']),info['resident_on'],os_release_key,info['swap'])

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to update attributes"
        return False
    print "OK - Updated attributes"

    # ec2 servers don't need to update the ip pool
    if info['server_type'] not in ["EC2"]:
        # update the ip pool
        nics = ("".join(info['network'].split())).split(',')
        for i in nics:
            nic_info = i.split(':')
            nic_name = "".join(nic_info[0:1])
            nic_addr = "".join(nic_info[1:2])
            # check if nic_name is not correct
            if nic_name.isalnum():
                # create nic
                url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=ip&op=add""".format(
                    rt_server)
                payload = """object_id={0}&bond_name={1}&ip={2}&bond_type=regular&submit.x=11&submit.y=6"""\
                          .format(object_id,nic_name,nic_addr)
                req = requests.post(url,
                                    data=payload,
                                    headers=rt_headers,
                                    auth=rt_auth)
                if req.status_code != requests.codes.ok:
                    print "Failed to update ip pool for {0}:{1}".format(
                        nic_name, nic_addr)
                    return False
                print "OK - Updated ip pool for {0}:{1}".format(
                    nic_name, nic_addr)

    # virtual servers don't need to update the rackspace
    if info['server_type'] not in ["EC2", "VM"]:
        # update rack info
        update_rack(info, object_id)

    # close db
    db.close()

    # end
    return True
Exemple #35
0
def upload_meta_tables():  
    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    db.execute('truncate table meta_tables')  # 先truncate 再插入

    db.execute('truncate table meta_statistics')  # 先truncate 再插入

    db.execute('truncate table meta_redundant_keys')  # 先truncate 再插入

    # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64
    v_sql = r"""SELECT b.ip,b.port,b.id as instance_id,a.id as schema_id,a.name as db_name from 
    resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id"""

    #print v_sql

    upload_tables_list = db.query(v_sql)

    if upload_tables_list: # 对实例表进行循环

        i=0

        

        for upload_table in upload_tables_list:

            instance_id = upload_table['instance_id']

            schema_id = upload_table['schema_id']

            db_name = upload_table['db_name']

            host_ip = upload_table['ip']

            mysql_port = upload_table['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')

            # 取表的元信息然后插入

            v_pl = r"""SELECT table_catalog,table_schema,table_name,table_type,engine,version,row_format,
            table_rows,avg_row_length,data_length,max_data_length,index_length,data_free,auto_increment,create_time,
            update_time,check_time,table_collation,checksum,
            create_options,table_comment from tables where table_type='BASE TABLE' and TABLE_SCHEMA='%s' """ % (db_name)

            #print v_pl

            table_list = db_remote.query(v_pl)

            for table_row in table_list:

                table_catalog = table_row['table_catalog']

                table_schema = table_row['table_schema']

                table_name = table_row['table_name']

                table_type = table_row['table_type']

                engine = table_row['engine']


                version = table_row['version']

                row_format = table_row['row_format']

                table_rows = table_row['table_rows']

                avg_row_length = table_row['avg_row_length']

                data_length = table_row['data_length']

                max_data_length = table_row['max_data_length']

                index_length = table_row['index_length']

                data_free = table_row['data_free']

                auto_increment = table_row['auto_increment']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入
                if auto_increment is None:
                    auto_increment = 'NULL'
                else:
                    auto_increment = "'"+str(auto_increment)+"'"

                # 日期型插入,由于要处理null值插入,经过处理后,以%s 进行插入
                create_time = table_row['create_time']

                if create_time is None:
                    create_time = 'NULL'
                else:
                    create_time = "'"+str(create_time)+"'"
                

                # 日期型插入,由于要处理null值插入,经过处理后,以%s 进行插入
                update_time = table_row['update_time']

                if update_time is None:
                    update_time = 'NULL'
                else:
                    update_time = "'"+str(update_time)+"'"


                check_time = table_row['check_time']

                if check_time is None:
                    check_time = 'NULL'
                else:
                    check_time = "'"+str(check_time)+"'"


                table_collation = table_row['table_collation']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入

                checksum = table_row['checksum']

                if checksum is None:
                    checksum = 'NULL'
                else:
                    checksum = "'"+str(checksum)+"'"
                


                create_options = table_row['create_options']

                table_comment = table_row['table_comment']

                
                #try:
                v_insert_sql='''insert into meta_tables(instance_id,schema_id,table_catalog,table_schema,table_name,
                table_type,engine,version,row_format,table_rows,avg_row_length,data_length,max_data_length,
                index_length,data_free,auto_increment,create_time,update_time,check_time,table_collation,
                checksum,create_options,table_comment) 
                values(%d,%d,'%s','%s','%s',
                '%s','%s',%d,'%s',%d,%d,%d,%d,
                %d,%d,%s,%s,%s,%s,'%s',
                %s,'%s','%s')''' % (
                instance_id,schema_id,table_catalog,table_schema,table_name,
                table_type,engine,version,row_format,table_rows,avg_row_length,data_length,max_data_length,
                index_length,data_free,auto_increment,create_time,update_time,check_time,table_collation,
                checksum,create_options,table_comment
                )

                #print v_insert_sql

                db.execute(v_insert_sql.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)


            # 取索引的元信息,然后插入

            v_pl2 = r"""SELECT table_catalog,table_schema,table_name,non_unique,index_schema,
            index_name,seq_in_index,column_name,collation,cardinality,sub_part,packed,nullable,
            index_type,comment,index_comment from statistics where TABLE_SCHEMA='%s' """ % (db_name)


            table_list2 = db_remote.query(v_pl2)

            for table_row2 in table_list2:

                table_catalog = table_row2['table_catalog']

                table_schema = table_row2['table_schema']

                table_name = table_row2['table_name']

                non_unique = table_row2['non_unique']

                index_schema = table_row2['index_schema']


                index_name = table_row2['index_name']

                seq_in_index = table_row2['seq_in_index']

                column_name = table_row2['column_name']

                collation = table_row2['collation']

                cardinality = table_row2['cardinality']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入

                if cardinality is None:
                    cardinality = 'NULL'
                else:
                    cardinality = "'"+str(cardinality)+"'"


                sub_part = table_row2['sub_part']

                # 本来Int类型,但为了处理Null值插入,以字符串类型%s 进行插入

                if sub_part is None:
                    sub_part = 'NULL'
                else:
                    sub_part = "'"+str(sub_part)+"'"


                packed = table_row2['packed']

                if packed is None:
                    packed = 'NULL'
                else:
                    
                    packed = '"'+packed+'"'   # 防止字符里面本身包含单引号


                nullable = table_row2['nullable']
                
                index_type = table_row2['index_type']

                comment = table_row2['comment']

                index_comment = table_row2['index_comment']

                

                
                #try:
                v_insert_sql2='''insert into meta_statistics(instance_id,schema_id,table_catalog,
                table_schema,table_name,non_unique,index_schema,index_name,seq_in_index,
                column_name,collation,cardinality,sub_part,packed,nullable,index_type,comment,index_comment) 
                values(%d,%d,'%s',
                '%s','%s',%d,'%s','%s',%d,
                '%s','%s',%s,%s,%s,'%s','%s','%s','%s')''' % (
                instance_id,schema_id,table_catalog,
                table_schema,table_name,non_unique,index_schema,index_name,seq_in_index,
                column_name,collation,cardinality,sub_part,packed,nullable,index_type,comment,index_comment
                )

                #print v_insert_sql

                db.execute(v_insert_sql2.replace('%','%%'))

                    
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert meta tables error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            db_remote.close()


            #连接远程实例,提取冗余索引信息
            db_remote2 = Connection(v_host,
                            'common_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')

            # 取common_schema.redundant_keys 视图信息然后插入

            v_pl3 = r"""SELECT table_schema,table_name,redundant_index_name,redundant_index_columns,
            redundant_index_non_unique,dominant_index_name,dominant_index_columns,
            dominant_index_non_unique,subpart_exists,sql_drop_index 
            from redundant_keys where table_schema='%s' """ % (db_name)

            #print v_pl

            table_list3 = db_remote2.query(v_pl3)

            for table_row3 in table_list3:

            

                table_schema = table_row3['table_schema']

                table_name = table_row3['table_name']

                redundant_index_name = table_row3['redundant_index_name']

                redundant_index_columns = table_row3['redundant_index_columns']


                redundant_index_non_unique = table_row3['redundant_index_non_unique']

                dominant_index_name = table_row3['dominant_index_name']

                dominant_index_columns = table_row3['dominant_index_columns']

                dominant_index_non_unique = table_row3['dominant_index_non_unique']


                subpart_exists = table_row3['subpart_exists']

                sql_drop_index = table_row3['sql_drop_index']

                
                
                #try:
                v_insert_sql3='''insert into meta_redundant_keys(instance_id,schema_id,table_schema,
                table_name,redundant_index_name,redundant_index_columns,redundant_index_non_unique,
                dominant_index_name,dominant_index_columns,
                dominant_index_non_unique,subpart_exists,sql_drop_index) 
                values(%d,%d,'%s',
                '%s','%s','%s',%d,
                '%s','%s',
                %d,%d,'%s')''' % (
                instance_id,schema_id,table_schema,
                table_name,redundant_index_name,redundant_index_columns,redundant_index_non_unique,
                dominant_index_name,dominant_index_columns,
                dominant_index_non_unique,subpart_exists,sql_drop_index
                )

                #print v_insert_sql

                db.execute(v_insert_sql3.replace('%','%%'))


            db_remote2.close()

            

            i=i+1


    db.close()
Exemple #36
0
def generate_mysql_config(tlp_file, schema_file, force=False):
    """
    Args:
        tlp_file(unicode): config template to be written in to config center db
        schema_file(unicode): scheme file
    """

    if not os.path.exists(tlp_file):
        raise ImproperlyConfigured(u"invalid config template file `%s`" %
                                   tlp_file)
    if not os.path.exists(schema_file):
        raise ImproperlyConfigured(u"invalid config scheme file path `%s`" %
                                   schema_file)
    with open(tlp_file) as fi:
        config = json.load(fi)
    with open(schema_file) as fi:
        schema = json.load(fi)
        Draft4Validator(schema).validate(config)
    config_center = settings.CONFIG_CENTER
    env = settings.ENV
    logging.warn("init config center with env `%s`" % env)
    from torndb import Connection
    db = None
    try:
        db_name = config_center["database"]
        db = Connection(host=config_center["host"],
                        database=db_name,
                        user=config_center["user"],
                        password=config_center["password"])
        table_name = "t_s_config_%s" % env
        if force:
            db.execute("DROP TABLE if EXISTS %s" % table_name)
        if db.query("show tables where Tables_in_%s = '%s'" %
                    (db_name, table_name)):
            raise RuntimeError(
                u"config center of env `%s` has already been initialized !")
        create_table_sql = u"""
            CREATE TABLE IF NOT EXISTS %s (
              `id` int(20) unsigned zerofill NOT NULL AUTO_INCREMENT,
              `node` varchar(40) COLLATE utf8_bin NOT NULL COMMENT '模块节点名',
              `key` varchar(40) COLLATE utf8_bin NOT NULL COMMENT '模块下键名',
              `value` varchar(200) COLLATE utf8_bin DEFAULT NULL COMMENT '模块键值',
              `key_type` varchar(6) COLLATE utf8_bin NOT NULL DEFAULT 'string' COMMENT '配置键对应的类型',
              `key_note` varchar(45) COLLATE utf8_bin DEFAULT NULL COMMENT '键名备注说明',
              PRIMARY KEY (`id`),
              UNIQUE KEY `unique_config` (`node`,`key`) USING BTREE COMMENT '配置唯一索引'
            ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='%s环境 系统运行配置表'"""\
                           % (table_name, env)
        db.execute(create_table_sql)
        sql = "INSERT INTO %s" % table_name
        sql += "(`node`, `key`, `value`, `key_type`) VALUES (%s, %s, %s, %s)"
        datas = []
        for key, value in config.items():
            for k, v in value.items():
                key_type = "string"
                if isinstance(v, list):
                    v = ", ".join(v)
                elif isinstance(v, bool):
                    key_type = "bool"
                elif isinstance(v, int):
                    key_type = "int"

                data = (key, k, v, key_type)
                datas.append(data)
        db.executemany_rowcount(sql, datas)
    except KeyError as e:
        raise ImproperlyConfigured(u"need config key `%s` of config_center" %
                                   (e.args[0]))
    finally:
        if db:
            db.close()
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id """

    print v_sql

    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    bak_server_list = db.query(v_sql)

    if bak_server_list: # 有server需要配置

        i=0

        for bak_server in bak_server_list:

            from_host = bak_server['ip']

            print from_host

            mysql_port = bak_server['port']

            backup_interval_type = bak_server['backup_interval_type']
Exemple #38
0
def update_db(info):
    """Automate server audit into Racktables"""

    # connect to racktables db
    db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass)

    # get object_type_id
    for item in db.query("select * from Dictionary where dict_value='{0}'".format(info["server_type"])):
        object_type_id = item.dict_key

    # delete object if already exists
    delete_object(info)

    # create object
    url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format(rt_server)
    if info["server_type"] in ["Server", "XenServer"]:
        payload = """0_object_type_id={0}&0_object_name={1}&0_object_label=&0_object_asset_no={1}&got_fast_data=Go%21""".format(
            object_type_id, info["hostname"]
        )
    if info["server_type"] in ["VM", "EC2"]:
        payload = """virtual_objects=&0_object_type_id={0}&0_object_name={1}&got_fast_data=Go%21""".format(
            object_type_id, info["hostname"]
        )

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to create object: {0}".format(info["hostname"])
        return False
    else:
        print "OK - Created object: {0}".format(info["hostname"])

    object_id = ""
    # get object_id
    for item in db.query("select * from Object where name='{0}'".format(info["hostname"])):
        object_id = item.id
    if not object_id:
        print "Failed to get object_id"
        return False

    # get os_release_id
    os_release_key = ""
    for item in db.query("select * from Dictionary where dict_value='{0}'".format(info["os_release"])):
        os_release_key = item.dict_key
    if not os_release_key:
        print "Failed to get object_type_id, please add '{0}' to 'Configuration - Dictionary - Server OS type'.".format(
            info["os_release"]
        )
        return False

    # update the informations of object, all post data formats were got by firebug on firefox
    url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=edit&op=update""".format(rt_server)
    if info["server_type"] == "Server":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no={2}&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=2&4_value=0&5_attr_id=26&5_value=0&6_attr_id=10006&6_value={6}\
&7_attr_id=10003&7_value={7}&8_attr_id=1&8_value=&9_attr_id=28&9_value=&10_attr_id=21&10_value=&11_attr_id=4&11_value={8}\
&12_attr_id=24&12_value=&13_attr_id=10005&13_value={9}&14_attr_id=25&14_value=&num_attrs=15&object_comment=&submit.x=15&submit.y=13""".format(
            object_id,
            object_type_id,
            info["hostname"],
            info["cpu_cores"],
            quote_plus(info["disk"]),
            info["fqdn"],
            info["memory"],
            quote_plus(info["network"]),
            os_release_key,
            info["swap"],
        )
    if info["server_type"] == "XenServer":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no={2}&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=1&7_value=&8_attr_id=28&8_value=&9_attr_id=4&9_value={8}&10_attr_id=24&10_value=&11_attr_id=10005&11_value={9}\
&12_attr_id=25&12_value=&13_attr_id=10008&13_value={10}&num_attrs=14&object_comment=&submit.x=18&submit.y=21""".format(
            object_id,
            object_type_id,
            info["hostname"],
            info["cpu_cores"],
            quote_plus(info["disk"]),
            info["fqdn"],
            info["memory"],
            quote_plus(info["network"]),
            os_release_key,
            info["swap"],
            quote_plus(info["vm_list"]),
        )
    if info["server_type"] == "EC2":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no=&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=10010&7_value={8}&8_attr_id=4&8_value={9}&9_attr_id=24&9_value=&10_attr_id=10005&10_value={10}&num_attrs=11&object_comment=&submit.x=19&submit.y=27""".format(
            object_id,
            object_type_id,
            info["hostname"],
            info["cpu_cores"],
            quote_plus(info["disk"]),
            info["fqdn"],
            info["memory"],
            quote_plus(info["network"]),
            info["ec2_pubname"],
            os_release_key,
            info["swap"],
        )
    if info["server_type"] == "VM":
        payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no=&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\
&2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\
&7_attr_id=10007&7_value={8}&8_attr_id=4&8_value={9}&9_attr_id=24&9_value=&10_attr_id=10005&10_value={10}&num_attrs=11&object_comment=&submit.x=25&submit.y=14'""".format(
            object_id,
            object_type_id,
            info["hostname"],
            info["cpu_cores"],
            quote_plus(info["disk"]),
            info["fqdn"],
            info["memory"],
            quote_plus(info["network"]),
            info["resident_on"],
            os_release_key,
            info["swap"],
        )

    req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
    if req.status_code != requests.codes.ok:
        print "Failed to update attributes"
        return False
    print "OK - Updated attributes"

    # ec2 servers don't need to update the ip pool
    if info["server_type"] not in ["EC2"]:
        # update the ip pool
        nics = ("".join(info["network"].split())).split(",")
        for i in nics:
            nic_info = i.split(":")
            nic_name = "".join(nic_info[0:1])
            nic_addr = "".join(nic_info[1:2])
            # check if nic_name is not correct
            if nic_name.isalnum():
                # create nic
                url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=ip&op=add""".format(rt_server)
                payload = """object_id={0}&bond_name={1}&ip={2}&bond_type=regular&submit.x=11&submit.y=6""".format(
                    object_id, nic_name, nic_addr
                )
                req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth)
                if req.status_code != requests.codes.ok:
                    print "Failed to update ip pool for {0}:{1}".format(nic_name, nic_addr)
                    return False
                print "OK - Updated ip pool for {0}:{1}".format(nic_name, nic_addr)

    # virtual servers don't need to update the rackspace
    if info["server_type"] not in ["EC2", "VM"]:
        # update rack info
        update_rack(info, object_id)

    # close db
    db.close()

    # end
    return True
def start_schedule():

#if __name__ == '__main__':
    os_user = config.OS_USER

    os_password = config.OS_APPS_PASSWD

    
    scheduler = Scheduler(daemonic = False)

    scheduler.print_jobs()

    #scheduler.remove_jobstore('file',close=True)

    #scheduler.shutdown(wait=False)

    

    scheduler.shutdown()

    #scheduler.unschedule_func(backup)

    scheduler.add_jobstore(ShelveJobStore('/tmp/db_schedule'), 'file')

    v_current_jobs = scheduler.get_jobs()

    print v_current_jobs

    if v_current_jobs:  # 如果job存在的话,先请客

     

        scheduler.unschedule_func(backup)

    

    #scheduler = Scheduler(standalone=True)
    #scheduler = Scheduler(daemon=True)
    #连接配置中心库,获取数据库备份周期等信息
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT a.instance_id,b.ip,b.port,a.backup_interval_type,a.backup_start_time from mysql_ins_bak_setup a,tag b where 
        a.instance_id=b.id """

    print v_sql

    bak_server_list = db.query(v_sql)

    if bak_server_list: # 有server需要配置

        i=0

        # 把还没有开始的调度任务,置为手工结束 backup_result_type=4
        v_manual_end_sql = 'update mysql_ins_bak_log set backup_result_type=4 where backup_result_type=0'

        db.execute(v_manual_end_sql)

        for bak_server in bak_server_list:

            instance_id = bak_server['instance_id']

            from_host = bak_server['ip']

            #print from_host

            mysql_port = bak_server['port']

            backup_interval_type = bak_server['backup_interval_type']

            backup_start_time = bak_server['backup_start_time']

            str_start_date= time.strftime("%Y-%m-%d") + ' ' + backup_start_time

            print str_start_date 

            if backup_interval_type == 1: # every day

                #内存jobstore

                #scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

                #文件jobstore jobstore='file'

                scheduler.add_interval_job(backup, days=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password], jobstore='file')
               
                #scheduler.add_interval_job(backup, days=1, start_date='2014-07-18 18:17:01', args=[from_host, mysql_port, os_user, os_password])

            elif backup_interval_type == 2: # every week

                scheduler.add_interval_job(backup, weeks=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

            elif backup_interval_type == 3: # every hour

                scheduler.add_interval_job(backup, hours=1, start_date=str_start_date, args=[from_host, mysql_port, os_user, os_password])

            # 开始在数据库记录备份的调度任务状态 0:调度任务已启动,实际备份还没有开始

            v_sche_start_sql = """insert into mysql_ins_bak_log(instance_id,backup_result_type) 
            values(%d,0)""" % (instance_id)

            db.execute(v_sche_start_sql)

            i=i+1


    db.close()


    if bak_server_list: # 有server需要配置

        scheduler.start()

        print 'success!'

        scheduler.print_jobs()

        '''
Exemple #40
0
#!/usr/bin/env python
# coding:utf-8
import redis
from torndb import Connection

from models import Domain, Flow, Rule
import settings

rd = redis.Redis(settings.REDIS_HOST, settings.REDIS_PORT)
db = Connection("127.0.0.1", "spider", user="******", password="******")

domains = db.query("select * from domain")

for d in domains:
    domain = Domain(rd, d["domain"])
    domain.update(d)
    if "scheduled" not in domain:
        domain["scheduled"] = 0

    rule_datas = db.query("select * from rule where domain = %s", d["domain"])
    for rule_data in rule_datas:
        rule = Rule(rd, d["domain"])
        rule.update(rule_data)

    flows = db.query("select * from flow where domain = %s", d["domain"])
    for f in flows:
        flow = Flow(rd, f["flow"])
        flow.update(f)
        if "scheduled" not in flow:
            flow["scheduled"] = 0
Exemple #41
0
def upload_processlist():

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT id,ip,port from tag b where online_flag=1 and is_showprocesslist=1"""

    #print v_sql

    upload_server_list = db.query(v_sql)

    if upload_server_list: # 对实例表进行循环

        i=0

        

        for upload_server in upload_server_list:

            instance_id = upload_server['id']

            host_ip = upload_server['ip']

            mysql_port = upload_server['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')


            v_pl = r"""SELECT ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,TIME_MS,ROWS_SENT,ROWS_EXAMINED from PROCESSLIST"""

            #print v_pl

            process_list = db_remote.query(v_pl)

            for process_row in process_list:

                vp_id = process_row['ID']

                vp_user = process_row['USER']

                vp_host = process_row['HOST']

                vp_db = process_row['DB']

                # 若是空,变成mysql的null,否则加上引号再传递进去,格式为%s ,而不是'%s'
                if vp_db is None:
                    vp_db = 'NULL'
                else:
                    vp_db = "'"+vp_db+"'"

                #print vp_db

                vp_command = process_row['COMMAND']

                vp_time = process_row['TIME']

                vp_state = process_row['STATE']

                vp_info = process_row['INFO']

                if vp_info is None:
                    vp_info = 'NULL'
                else:
                    #vp_info = "'"+vp_info+"'"
                    vp_info = vp_info.replace('"',"'") # 双引号替换为单引号
                    vp_info = '"'+vp_info+'"'   # 防止字符里面本身包含单引号

                vp_time_ms = process_row['TIME_MS']

                vp_rows_sent = process_row['ROWS_SENT']

                vp_rows_examined = process_row['ROWS_EXAMINED']

                # v_insert_sql='''insert into log_processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                # TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s','%s','%s',%d,'%s','%s',%d,%d,%d)''' % (
                # instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)
                
                #try:
                v_insert_sql='''insert into log_processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s',%s,'%s',%d,'%s',%s,%d,%d,%d)''' % (
                instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)


                #print v_insert_sql

                db.execute(v_insert_sql.replace('%','%%'))

                    #db.execute(v_insert_sql)
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert process_list error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            db_remote.close()

            

            i=i+1


    db.close()
def upload_auto_increment():

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT id,ip,port from tag b where online_flag=1 and is_showprocesslist=1"""

    print v_sql

    upload_server_list = db.query(v_sql)

    if upload_server_list: # 对实例表进行循环

        i=0

        

        for upload_server in upload_server_list:

            instance_id = upload_server['id']

            host_ip = upload_server['ip']

            mysql_port = upload_server['port']

            v_host =host_ip + ':' + str(mysql_port)

            print v_host

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')


            v_pl = r"""select information_schema . columns . TABLE_SCHEMA AS TABLE_SCHEMA,
                        information_schema . columns . TABLE_NAME AS TABLE_NAME,
                        information_schema . columns . COLUMN_NAME AS COLUMN_NAME,
                        information_schema . columns . DATA_TYPE AS DATA_TYPE,
                        information_schema . columns . COLUMN_TYPE AS COLUMN_TYPE,
                        ((case information_schema . columns . DATA_TYPE
                        when 'tinyint' then
                            255
                        when 'smallint' then
                            65535
                        when 'mediumint' then
                            16777215
                        when 'int' then
                            4294967295
                        when 'bigint' then
                            18446744073709551615
                        end) >>
                        if((locate('unsigned', information_schema . columns . COLUMN_TYPE) > 0),
                            0,
                            1)) AS MAX_VALUE,
                        information_schema . tables . AUTO_INCREMENT AS AUTO_INCREMENT,
                        (information_schema . tables .
                        AUTO_INCREMENT /
                        ((case information_schema . columns . DATA_TYPE
                            when 'tinyint' then
                                255
                            when 'smallint' then
                                65535
                            when 'mediumint' then
                                16777215
                            when 'int' then
                                4294967295
                            when 'bigint' then
                                18446744073709551615
                        end) >>
                        if((locate('unsigned', information_schema . columns . COLUMN_TYPE) > 0),
                            0,
                            1))) AS AUTO_INCREMENT_RATIO
                        from (information_schema . columns join information_schema . tables
                          on(((information_schema . columns . TABLE_SCHEMA = information_schema .
                               tables . TABLE_SCHEMA) and
                               (information_schema . columns . TABLE_NAME = information_schema .
                               tables . TABLE_NAME))))
                        where ((information_schema . columns .TABLE_SCHEMA not in
                                    ('mysql', 'INFORMATION_SCHEMA', 'performance_schema','common_schema')) and
                                    (information_schema . tables . TABLE_TYPE = 'BASE TABLE') and
                                    (information_schema . columns . EXTRA = 'auto_increment'))"""

            print v_pl

            process_list = db_remote.query(v_pl)

            for process_row in process_list:

                vp_server_ip = v_host

                vp_instance_id = instance_id

                #vp_id = process_row['ID']
                vp_table_schema = process_row['TABLE_SCHEMA']

                vp_table_name = process_row['TABLE_NAME']

                vp_column_name = process_row['COLUMN_NAME']

                vp_data_type = process_row['DATA_TYPE']

                # 若是空,变成mysql的null,否则加上引号再传递进去,格式为%s ,而不是'%s'
                #if vp_db is None:
                #    vp_db = 'NULL'
                #else:
                #    vp_db = "'"+vp_db+"'"

                #print vp_db

                vp_column_type = process_row['COLUMN_TYPE']

                vp_max_value = process_row['MAX_VALUE']

                vp_auto_increment = process_row['AUTO_INCREMENT']

                vp_auto_increment_ratio = process_row['AUTO_INCREMENT_RATIO']

                #vp_info = process_row['INFO']

                #if vp_info is None:
                #    vp_info = 'NULL'
                #else:
                #    #vp_info = "'"+vp_info+"'"
                #    vp_info = vp_info.replace('"',"'") # 双引号替换为单引号
                #    vp_info = '"'+vp_info+'"'   # 防止字符里面本身包含单引号

                # v_insert_sql='''insert into log_processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                # TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s','%s','%s',%d,'%s','%s',%d,%d,%d)''' % (
                # instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)
                
                #try:
                v_delete_sql='''delete from  auto_increment_list where INSTANCE_ID = '%s' and TABLE_SCHEMA = '%s' and TABLE_NAME = '%s' and COLUMN_NAME = '%s' ''' % (
                vp_instance_id,vp_table_schema,vp_table_name,vp_column_name)


                print v_delete_sql

                db.execute(v_delete_sql)


                v_insert_sql='''insert into auto_increment_list(instance_id,server_ip,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,DATA_TYPE,COLUMN_TYPE,MAX_VALUE,AUTO_INCREMENT,AUTO_INCREMENT_RATIO) 
                values(%d,'%s','%s','%s','%s','%s','%s',%d,%d,%d)''' % (
                vp_instance_id,vp_server_ip,vp_table_schema,vp_table_name,vp_column_name,vp_data_type,vp_column_type,int(vp_max_value),int(vp_auto_increment),int(vp_auto_increment_ratio))


                print v_insert_sql

                db.execute(v_insert_sql)

                    #db.execute(v_insert_sql)
                # except Exception, e:
                #     print e.message
                #     print v_insert_sql
                #     text = "insert process_list error!," + e.message + ',' + v_insert_sql
                #     log_w(text)

            db_remote.close()

            

            i=i+1

        
        v_sendmail_sql=r'''select count(*) count
                    from auto_increment_list b where AUTO_INCREMENT_RATIO >= 80 '''
        

        
        if_list = db.query(v_sendmail_sql)
        
        for if_row in if_list:
            v_if = if_row['count']
            print v_if    
            if v_if >= 1 :
                print '11'
                
                
                v_warn_sql=r'''select SERVER_IP,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,MAX_VALUE,AUTO_INCREMENT,AUTO_INCREMENT_RATIO 
                            from auto_increment_list b where AUTO_INCREMENT_RATIO >= 80 '''
                print v_warn_sql
                warn_list = db.query(v_warn_sql)
                v_server_ip = '\r\n'
                for warn_row in warn_list:
                    v_server_ip = v_server_ip + warn_row['SERVER_IP'] +'|对象名:' + warn_row['TABLE_SCHEMA'] +'|表名:'+ warn_row['TABLE_NAME'] + '|字段名:' + warn_row['COLUMN_NAME']+'\r\n'                  
                    
                print v_server_ip
                v_msg_text = v_server_ip
                v_receiver = '[email protected],[email protected]'
                v_subject = '有快溢出的自增ID,细节请去监控页面查看'
                v_return = func.send_mail_to_devs(v_receiver,v_subject,v_msg_text)

    db.close()
Exemple #43
0
    def get(self):
        db = Connection("127.0.0.1", "zfz", "zfz", "zfz...891", 25200)
        q = self.get_argument(name="query", default="")
        p = self.get_argument(name="price", default="999999999")
        ma = re.search(r"\d+", p)
        if ma is None:
            p = "999999999"
        else:
            p = ma.group(0)

        q = q.lstrip().rstrip().replace("'", "").replace('"', "").replace("#", "").replace("%", "")
        qs = q.split(" ")

        if len(q) > 0:
            if len(qs) == 1:
                m = "%" + q + "%"
                items = db.query(
                    "select title, url, price, area, arch, address, district "
                    "from pages where price <= %s and (address like %s or district like %s or title like %s) "
                    "order by date desc limit 20",
                    p,
                    m,
                    m,
                    m,
                )
            else:
                l = qs[0]
                r = qs[-1]
                m1 = ""
                m2 = ""
                for i in range(1, len(qs) - 1):
                    m1 += "%" + qs[i]
                    m2 += "%" + qs[len(qs) - 1 - i]

                m1 += "%"
                m2 += "%"

                items = db.query(
                    "select title, url, price, area, arch, address, district "
                    "from pages where price <= %s and ((address like %s and district like %s) or "
                    "(address like %s and district like %s) or "
                    "(title like %s and address like %s) or "
                    "(title like %s and address like %s and district like %s) or "
                    #                        "title like %s or "
                    "address like %s) " "order by date desc limit 20",
                    p,
                    "%" + l + m1,
                    "%" + r + "%",
                    "%" + r + m2,
                    "%" + l + "%",
                    "%" + l + "%",
                    m1 + r + "%",
                    "%" + l + "%",
                    m1,
                    "%" + r + "%",
                    #                        '%' + l + m1 + r + '%',
                    "%" + l + m1 + r + "%",
                )
        else:
            items = []

        if len(items) < 1:
            hit = False
        else:
            hit = True

        if p == "999999999":
            p = ""

        self.render("search.html", query=q, price=p, items=items, hit=hit)
def upload_processlist():

    #连接配置中心库
    db = Connection('/tmp/mysql3306.sock',
                    config.DB_NAME,
                    config.DB_USER,
                    config.DB_PASSWD,
                    time_zone='+8:00')

    v_sql = r"""SELECT id,ip,port from tag b where online_flag=1 and is_showprocesslist=1"""

    #print v_sql

    upload_server_list = db.query(v_sql)

    if upload_server_list: # 对实例表进行循环

        i=0

        

        for upload_server in upload_server_list:

            instance_id = upload_server['id']

            host_ip = upload_server['ip']

            mysql_port = upload_server['port']

            v_host =host_ip + ':' + str(mysql_port)

            #连接远程实例
            db_remote = Connection(v_host,
                            'information_schema',
                            config.DBA_QUERY_USER,
                            config.DBA_QUERY_PASSWD,
                            time_zone='+8:00')


            v_pl = r"""SELECT ID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,TIME_MS,ROWS_SENT,ROWS_EXAMINED from PROCESSLIST"""

            #print v_pl

            process_list = db_remote.query(v_pl)

            for process_row in process_list:

                vp_id = process_row['ID']

                vp_user = process_row['USER']

                vp_host = process_row['HOST']

                vp_db = process_row['DB']

                vp_command = process_row['COMMAND']

                vp_time = process_row['TIME']

                vp_state = process_row['STATE']

                vp_info = process_row['INFO']

                vp_time_ms = process_row['TIME_MS']

                vp_rows_sent = process_row['ROWS_SENT']

                vp_rows_examined = process_row['ROWS_EXAMINED']

                v_insert_sql='''insert into processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO,
                TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s','%s','%s',%d,'%s','%s',%d,%d,%d)''' % (
                instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined)

                db.execute(v_insert_sql)

            db_remote.close()

            

            i=i+1


    db.close()