def get(self): db = Connection(settings.DATABASE_SERVER, settings.DATABASE_NAME, settings.DATABASE_USER, settings.DATABASE_PASSWORD, ) condition = """WHERE target_user_id='1' and consumed='0' and article_comment.user_id=user.id and article_comment.article_id=articles.id""" sql = """SELECT user_id, name AS user_name, email, article_id, article_comment.time AS time, title as article_title, article_comment.content AS content FROM article_comment,user,articles {0} ORDER BY article_comment.time DESC;""".format(condition) comments = db.query(sql) for comment in comments: comment["time"] = convert_to_time_zone(comment["time"], "Asia/Shanghai") comment["time"] = comment["time"].strftime("%Y-%m-%d %H:%M:%S") kwargs = dict(comments=comments, ) self.render("", kwargs=kwargs)
def get_env_iter(): db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') v_show_env_type = config.SHOW_ENV_TYPE if v_show_env_type==1: v_more_sql = '' elif v_show_env_type==2: #id =3 表示生产环境 v_more_sql = ' and id=3' elif v_show_env_type==3: v_more_sql = ' and id!=3' else: pass str_sql = "select id,name from resources_env where 1=1 %s order by id" % (v_more_sql) env_list = db.iter(str_sql) db.close() return env_list
def delete_object(info): """Delete object from DB""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # check if object_id already exists, then create object if not object_id = "" for item in db.query("select * from Object where name='{0}'".format(info["hostname"])): object_id = item.id # delete object if already exists if object_id: url = """http://{0}/racktables/index.php?module=redirect&op=deleteObject&page=depot&tab=addmore&object_id={1}""".format( rt_server, object_id ) req = requests.get(url, auth=rt_auth) if req.status_code != requests.codes.ok: print "Failed to delete the existing object: {0}".format(info["hostname"]) return False else: print "OK - Deleted the existing object: {0}".format(info["hostname"]) # close db db.close() return True
def do_update(database=None): """Perform databse update.""" # Pick up the database credentials creds = get_db_creds(database) # If we couldn't find corresponding credentials, throw a 404 if not creds: msg = "Unable to find credentials matching {0}." return {"ERROR": msg.format(database)}, 404 # Prepare the database connection app.logger.debug("Connecting to %s database (%s)" % ( database, request.remote_addr)) db = Connection(**creds) # See if we received a query sql = request.form.get('sql') if not sql: sql = request.args.get('sql') if not sql: return {"ERROR": "SQL query missing from request."}, 400 # If the query has a percent sign, we need to excape it if '%' in sql: sql = sql.replace('%', '%%') # Attempt to run the query try: app.logger.info("%s attempting to run \" %s \" against %s database" % ( request.remote_addr, sql, database)) results = db.update(sql) app.logger.info(results) except Exception, e: return {"ERROR": ": ".join(str(i) for i in e.args)}, 422
def _connection(config): conn = None try: if config.DB_BACKEND == 'mysql': from torndb import Connection conn = Connection("%s:%s" % (config.DB_HOST, config.DB_PORT), config.DB_NAME, user=config.DB_USER, password=config.DB_PASS) elif config.DB_BACKEND == 'sqlite': import sqlite3 conn = sqlite3.connect(config.DB_NAME) conn.row_factory = _dict_factory elif config.DB_BACKEND == 'postgres': import psycopg2 from psycopg2.extras import DictConnection conn = psycopg2.connect( database=config.DB_NAME, user=config.DB_USER, password=config.DB_PASS, host=config.DB_HOST, port=config.DB_PORT, connection_factory=DictConnection, ) else: raise ValueError("Unknown backend %r" % config.DB_BACKEND) yield conn finally: if conn is not None: conn.close()
def do_query(database=None): # Pick up the database credentials # app.logger.warning("%s requesting access to %s database" % ( # request.remote_addr, database)) creds = get_db_creds(database) # If we couldn't find corresponding credentials, throw a 404 if not creds: return {"ERROR": "Unable to find credentials matching %s." % database} abort(404) # Prepare the database connection app.logger.debug("Connecting to %s database (%s)" % ( database, request.remote_addr)) db = Connection(**creds) # See if we received a query sql = request.form.get('sql') if not sql: sql = request.args.get('sql') if not sql: return {"ERROR": "SQL query missing from request."} # If the query has a percent sign, we need to excape it if '%' in sql: sql = sql.replace('%', '%%') # Attempt to run the query try: app.logger.info("%s attempting to run \" %s \" against %s database" % ( request.remote_addr, sql, database)) results = db.query(sql) app.logger.info(results) except Exception, e: return {"ERROR": ": ".join(str(i) for i in e.args)}
def delete_object(info): """Delete object from DB""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # check if object_id already exists, then create object if not object_id = "" for item in db.query("select * from Object where name='{0}'".format( info['hostname'])): object_id = item.id # delete object if already exists if object_id: url = """http://{0}/racktables/index.php?module=redirect&op=deleteObject&page=depot&tab=addmore&object_id={1}"""\ .format(rt_server,object_id) req = requests.get(url, auth=rt_auth) if req.status_code != requests.codes.ok: print "Failed to delete the existing object: {0}".format( info['hostname']) return False else: print "OK - Deleted the existing object: {0}".format( info['hostname']) # close db db.close() return True
def _connection(config): conn = None try: if config.DB_BACKEND == 'mysql': from torndb import Connection conn = Connection( "%s:%s" % (config.DB_HOST, config.DB_PORT), config.DB_NAME, user=config.DB_USER, password=config.DB_PASS ) elif config.DB_BACKEND == 'sqlite': import sqlite3 conn = sqlite3.connect(config.DB_NAME) conn.row_factory = _dict_factory elif config.DB_BACKEND == 'postgres': import psycopg2 from psycopg2.extras import DictConnection conn = psycopg2.connect( database=config.DB_NAME, user=config.DB_USER, password=config.DB_PASS, host=config.DB_HOST, port=config.DB_PORT, connection_factory=DictConnection, ) else: raise ValueError("Unknown backend %r" % config.DB_BACKEND) yield conn finally: if conn is not None: conn.close()
def get_project_keys_from_mysql(self): from torndb import Connection db = Connection( "%s:%s" % (self.config.MYSQL_HOST, self.config.MYSQL_PORT), self.config.MYSQL_DB, user=self.config.MYSQL_USER, password=self.config.MYSQL_PASS ) query = "select project_id, public_key, secret_key from sentry_projectkey" logging.info("Executing query %s in MySQL", query) project_keys = {} try: db_projects = db.query(query) if db_projects is None: return None for project in db_projects: logging.info("Updating information for project with id %s...", project.project_id) self.add_project(project_keys, project.project_id, project.public_key, project.secret_key) finally: db.close() return project_keys
def update_rack(info, object_id): """Automate server audit for rack info into Racktables""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # update the rackspace if opts['rackspace']: rs_info = opts['rackspace'].split(':') colo = "".join(rs_info[0:1]) row = "".join(rs_info[1:2]) rack = "".join(rs_info[2:3]) atom = "".join(rs_info[3:4]) if not atom: print "The rackspace is not correct" return False # get rack_id for item in db.query( "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'" .format(rack, colo, row)): rack_id = item.id if not rack_id: print "Failed to get rack_id" return False atom_list = atom.split(',') atom_data = [] for i in atom_list: if opts['rackposition']: if opts['rackposition'] in ['left', 'front']: atom_data.append("&atom_{0}_{1}_0=on".format(rack_id, i)) if opts['rackposition'] in ['right', 'back']: atom_data.append("&atom_{0}_{1}_2=on".format(rack_id, i)) if opts['rackposition'] in ['interior']: atom_data.append("&atom_{0}_{1}_1=on".format(rack_id, i)) else: atom_data.append( "&atom_{0}_{1}_0=on&atom_{0}_{1}_1=on&atom_{0}_{1}_2=on". format(rack_id, i)) atom_url = "".join(atom_data) url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=rackspace&op=updateObjectAllocation""".format( rt_server) payload = """object_id={0}&rackmulti%5B%5D={1}&comment=&got_atoms=Save{2}"""\ .format(object_id,rack_id,atom_url) req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth) if req.status_code != requests.codes.ok: print "Failed to update rackspace" return False print "OK - Updated rackspace" # close db db.close() # end return True
def get(self): # template_name = "article_list.html" template_name = "mobile/article_list.html" category = self.get_argument("category", "all") cur_page = self.get_argument("page", "0") query = self.get_argument("query", "") num_per_page = 5 db = Connection(settings.DATABASE_SERVER, settings.DATABASE_NAME, settings.DATABASE_USER, settings.DATABASE_PASSWORD, ) condition = "WHERE category='{0}'".format(category) if category == "all": condition = "" if query: condition = """WHERE UPPER(title) LIKE '%%{0}%%' OR UPPER(profile) LIKE '%%{0}%%' OR UPPER(author) LIKE '%%{0}%%' OR UPPER(content) LIKE '%%{0}%%' """.format(query.upper()) sql = "SELECT COUNT(*) FROM articles {0}".format(condition) count = db.query(sql)[0]["COUNT(*)"] max_page = int(math.ceil((count + 0.0) / num_per_page)) kwargs = dict( category=category, query=query, cur_page=int(cur_page), max_page=max_page) self.render(template_name, **kwargs)
def get(self, article_id): # template_name = "article_details.html" template_name = "mobile/article_details.html" db = Connection(settings.DATABASE_SERVER, settings.DATABASE_NAME, settings.DATABASE_USER, settings.DATABASE_PASSWORD, ) sql = "SELECT * FROM articles WHERE id='{0}'".format( article_id) article = db.query(sql)[0] # article["read_count"], article["comment_count"] = \ # get_article_statistics(db, article_id) article["url"] = urllib.quote(article["url"]) # Update article read count now = datetime.datetime.now() sql = """INSERT INTO article_reads (`article_id`, `user_id`, `time`) VALUES ('{0}', '{1}', '{2}') """.format(article_id, 0, now) db.execute(sql) kwargs = dict(article=article, day=article["day"]) super(ArticleDetailsHandler, self).render( template_name, **kwargs )
def do_update(database=None): # Pick up the database credentials # app.logger.warning("%s requesting access to %s database" % ( # request.remote_addr, database)) creds = get_db_creds(database) # If we couldn't find corresponding credentials, throw a 404 if not creds: return {"ERROR": "Unable to find credentials matching %s." % database} abort(404) # Prepare the database connection app.logger.debug("Connecting to %s database (%s)" % ( database, request.remote_addr)) db = Connection(**creds) # See if we received a query sql = request.form.get('sql') if not sql: sql = request.args.get('sql') if not sql: return {"ERROR": "SQL query missing from request."} # If the query has a percent sign, we need to excape it if '%' in sql: sql = sql.replace('%', '%%') # Attempt to run the query try: app.logger.info("%s attempting to run \" %s \" against %s database" % ( request.remote_addr, sql, database)) results = db.update(sql) app.logger.info(results) except Exception, e: return {"ERROR": ": ".join(str(i) for i in e.args)}
def update_rack(info, object_id): """Automate server audit for rack info into Racktables""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # update the rackspace if opts["rackspace"]: rs_info = opts["rackspace"].split(":") colo = "".join(rs_info[0:1]) row = "".join(rs_info[1:2]) rack = "".join(rs_info[2:3]) atom = "".join(rs_info[3:4]) if not atom: print "The rackspace is not correct" return False # get rack_id for item in db.query( "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'".format( rack, colo, row ) ): rack_id = item.id if not rack_id: print "Failed to get rack_id" return False atom_list = atom.split(",") atom_data = [] for i in atom_list: if opts["rackposition"]: if opts["rackposition"] in ["left", "front"]: atom_data.append("&atom_{0}_{1}_0=on".format(rack_id, i)) if opts["rackposition"] in ["right", "back"]: atom_data.append("&atom_{0}_{1}_2=on".format(rack_id, i)) if opts["rackposition"] in ["interior"]: atom_data.append("&atom_{0}_{1}_1=on".format(rack_id, i)) else: atom_data.append("&atom_{0}_{1}_0=on&atom_{0}_{1}_1=on&atom_{0}_{1}_2=on".format(rack_id, i)) atom_url = "".join(atom_data) url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=rackspace&op=updateObjectAllocation""".format( rt_server ) payload = """object_id={0}&rackmulti%5B%5D={1}&comment=&got_atoms=Save{2}""".format( object_id, rack_id, atom_url ) req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth) if req.status_code != requests.codes.ok: print "Failed to update rackspace" return False print "OK - Updated rackspace" # close db db.close() # end return True
def execute_query(query, **kw_dict): """Execute query """ conn = Connection(**MYSQL_SETTINGS) if kw_dict is None: try: result = conn.execute(query) conn.close() return result except Exception, e: print e
def get_query_result(query, **kw_dict): """get query result """ conn = Connection(**MYSQL_SETTINGS) if kw_dict is None: try: result = conn.query(query) conn.close() return result except Exception, e: print e
def update_blank_switch_security_pdu_offline(info): """Automate server autodir for PatchPanel/NetworkSwitch/NetworkSecurity/PDU into Racktables or as offline mode""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # delete object if already exists delete_object(info) # create object url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format(rt_server) if opts["blank"]: payload = """0_object_type_id=9&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format( info["hostname"] ) if opts["switch"]: payload = """0_object_type_id=8&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format( info["hostname"] ) if opts["security"]: payload = """0_object_type_id=798&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format( info["hostname"] ) if opts["pdu"]: payload = """0_object_type_id=2&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format( info["hostname"] ) if opts["offline"]: payload = """0_object_type_id=4&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21""".format( info["hostname"] ) req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth) if req.status_code != requests.codes.ok: print "Failed to create object: {0}".format(info["hostname"]) return False else: print "OK - Created object: {0}".format(info["hostname"]) # get object_id for item in db.query("select * from Object where name='{0}'".format(info["hostname"])): object_id = item.id if not object_id: print "Failed to get object_id" return False # update rack info update_rack(info, object_id) # close db db.close() # end return True
def get(self): category = self.get_argument("category", "all") cur_page = self.get_argument("page", "0") query = self.get_argument("query", "") num_per_page = 5 db = Connection(settings.DATABASE_SERVER, settings.DATABASE_NAME, settings.DATABASE_USER, settings.DATABASE_PASSWORD, ) condition = "WHERE category='{0}'".format(category) if category == "all": condition = "" if query: condition = """WHERE UPPER(title) LIKE '%%{0}%%' OR UPPER(profile) LIKE '%%{0}%%' OR UPPER(author) LIKE '%%{0}%%' OR UPPER(content) LIKE '%%{0}%%' """.format(query.upper()) sql = "SELECT COUNT(*) FROM articles {0}".format(condition) count = db.query(sql)[0]["COUNT(*)"] max_page = int(math.ceil((count + 0.0) / num_per_page)) sql = """SELECT articles.id AS id, title, profile, author, url, picUrl, articles.time AS time, (SELECT COUNT(*) FROM article_reads WHERE article_reads.article_id=articles.id) AS read_count, (SELECT COUNT(*) FROM article_comment WHERE article_comment.article_id=articles.id) AS comment_count FROM articles {0} ORDER BY articles.time DESC LIMIT {1}, {2}; """.format(condition, int(cur_page) * num_per_page, num_per_page) articles = db.query(sql) for art in articles: # art["read_count"], art["comment_count"] = \ # get_article_statistics(db, art["id"]) art["time"] = art["time"].strftime("%Y-%m-%d") art["picUrl"] = "/image-proxy/?url={0}".format( urllib.quote(art["picUrl"]) ) kwargs = dict(articles=articles, category=category, cur_page=int(cur_page), max_page=max_page) self.render("", kwargs=kwargs)
def main(): db1 = Connection(options.database_host, options.database_user, options.database_password) #grab all group shakes shakes = db1.query("""SELECT id, user_id FROM shake WHERE type=%s""", "group") for shake in shakes: db1.execute( """INSERT IGNORE INTO subscription (user_id, shake_id, deleted, created_at, updated_at) VALUES (%s, %s, 0, NOW(), NOW())""", shake['user_id'], shake['id'])
def update_blank_switch_security_pdu_offline(info): """Automate server autodir for PatchPanel/NetworkSwitch/NetworkSecurity/PDU into Racktables or as offline mode""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # delete object if already exists delete_object(info) # create object url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format( rt_server) if opts['blank']: payload = """0_object_type_id=9&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\ .format(info['hostname']) if opts['switch']: payload = """0_object_type_id=8&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\ .format(info['hostname']) if opts['security']: payload = """0_object_type_id=798&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\ .format(info['hostname']) if opts['pdu']: payload = """0_object_type_id=2&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\ .format(info['hostname']) if opts['offline']: payload = """0_object_type_id=4&0_object_name={0}&0_object_label=&0_object_asset_no={0}&got_fast_data=Go%21"""\ .format(info['hostname']) req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth) if req.status_code != requests.codes.ok: print "Failed to create object: {0}".format(info['hostname']) return False else: print "OK - Created object: {0}".format(info['hostname']) # get object_id for item in db.query("select * from Object where name='{0}'".format( info['hostname'])): object_id = item.id if not object_id: print "Failed to get object_id" return False # update rack info update_rack(info, object_id) # close db db.close() # end return True
def main(): db1 = Connection(options.database_host, options.database_user, options.database_password) #grab all shared files in order sfs = db1.query("""SELECT id FROM sharedfile ORDER BY created_at""") #for each, get counts for sf in sfs: likes = 0 saves = 0 like_counts = db1.query( "SELECT count(id) as like_count from favorite where sharedfile_id = %s and deleted=0", (sf.id)) if like_counts: likes = like_counts[0]['like_count'] save_counts = db1.query( "SELECT count(id) AS save_count FROM sharedfile WHERE original_id = %s and deleted = 0", sf.id) if save_counts[0]['save_count'] > 0: saves = save_counts[0]['save_count'] else: save_counts = db1.query( "SELECT count(id) AS save_count FROM sharedfile WHERE parent_id = %s and deleted = 0", sf.id) saves = save_counts[0]['save_count'] if likes > 0 or saves > 0: print "UPDATE sharedfile SET like_count = %s, save_count = %s WHERE id = %s" % ( likes, saves, sf.id) print db1.execute( "UPDATE sharedfile SET like_count = %s, save_count = %s WHERE id = %s", likes, saves, sf.id)
def log_dba_jobs_progress(v_job_id,v_cur_prog_desc,v_cur_cum_prog_desc,v_cur_prog_com_rate): db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') v_add_job_progress_sql='''insert into dba_job_progress(job_id,cur_prog_desc,cur_cum_prog_desc,cur_prog_com_rate) values(%d,'%s','%s',%d)''' % ( v_job_id,v_cur_prog_desc,v_cur_cum_prog_desc,v_cur_prog_com_rate) db.execute(v_add_job_progress_sql.replace('%','%%')) db.close()
def final_dba_job(v_job_id): db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') # 更新job队列状态为完成 v_update_job_sql = '''update dba_jobs set status=1 where job_id=%d''' % ( v_job_id) db.execute(v_update_job_sql.replace('%','%%')) db.close()
def get_conn(db_id): if db_id in _CONNS_: return _CONNS_[db_id] _CONNS_[db_id] = db = Connection(**configs['db'][db_id]) db._db_args.pop('init_command', None) db.execute("SET TIME_ZONE = 'SYSTEM'") return db
def __init__(self): from d3status.urls import handlers, ui_modules from d3status.db import Model settings = dict( debug=options.debug, template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), login_url=options.login_url, xsrf_cookies=options.xsrf_cookies, cookie_secret=options.cookie_secret, ui_modules=ui_modules, #autoescape=None, ) # d3status db connection self.db = Connection( host=options.mysql["host"] + ":" + options.mysql["port"], database=options.mysql["database"], user=options.mysql["user"], password=options.mysql["password"], ) Model.setup_dbs({"db": self.db}) super(Application, self).__init__(handlers, **settings)
def initial_dba_job(v_op_user,v_op_comment): db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') v_add_job_sql = '''insert into dba_jobs(op_user,job_desc) values('%s','%s')''' % ( v_op_user,v_op_comment) v_job_id=db.execute(v_add_job_sql.replace('%','%%')) db.close() return v_job_id
def upload_processlist(): # 连接配置中心库 # db = Connection('/home/apps/inception/inc.socket', # '', # '', # '', # time_zone='+8:00') db = Connection("127.0.0.1:6669", "", "", "", time_zone="+8:00") print "aa" v_sql = r"""/*--user=mysqladmin;--password=mysql;--host=172.26.137.125; --enable-check;--port=3306;*/ inception_magic_start; use test; CREATE TABLE adaptive_office23(id int); inception_magic_commit;""" # print v_sql upload_server_list = db.iter(v_sql) if upload_server_list: # 对实例表进行循环 i = 0 print upload_server_list for upload_server in upload_server_list: stage = upload_server["stage"] print stage stagestatus = upload_server["stagestatus"] print stagestatus # mysql_port = upload_server['port'] # v_host =host_ip + ':' + str(mysql_port) # i=i+1 db.close()
def getInstance(): if not MysqlConn.__instance: MysqlConn.__lock.acquire() if not MysqlConn.__instance: MysqlConn.__instance = Connection.__new__(MysqlConn) MysqlConn.__instance.__DBUtilInit() MysqlConn.__lock.release() return MysqlConn.__instance
def get_job_status(v_job_id): db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') v_get_sql = '''SELECT cur_cum_prog_desc,cur_prog_com_rate,cur_prog_shell_cmd from dba_job_progress where id=(select max(id) from dba_job_progress where job_id=%d)''' % ( v_job_id) job_list = db.query(v_get_sql) db.close() return job_list
def __init__(self): self.c = tornadoredis.Client() self.c.connect() self.db = Connection('127.0.0.1:3306', 'mysql', user='******', password='******') handlers = [(r"/", IndexHandler)] tornado.web.Application.__init__(self, handlers, debug=True)
def create_mysql_engine(): """ 创建mysql连接 """ protocol, host, port, user, password, database, charset, timezone = ParseMySQL( MYSQL_URL, callback="tuple") return Connection(host="{}:{}".format(host, port), database=database, user=user, password=password, time_zone=timezone, charset=charset)
def post(self): title = self.get_argument("title", "") author = self.get_argument("author", "") category = self.get_argument("category", "") date = self.get_argument("date", "") profile = self.get_argument("profile", "") picUrl = self.get_argument("picUrl", "") url = self.get_argument("url", "") time = datetime.datetime.strptime(date, "%m/%d/%Y") day = { 0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun", }[time.weekday()] sql = """INSERT INTO articles (`title`, `author`, `day`, `time`, `url`, `profile`, `picUrl`, `category`) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}'); """.format( title, author, day, time, url, profile, picUrl, category ) db = Connection(settings.DATABASE_SERVER, settings.DATABASE_NAME, settings.DATABASE_USER, settings.DATABASE_PASSWORD, ) lastrowid = db.execute(sql) if lastrowid: self.redirect("/articles/details/{0}/".format(lastrowid)) else: template_name = "/upload_error.html" self.render(template_name)
def get_domain_list_from_env(v_role_id,belong_env): db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') str_sql = '''select b.id,b.name from resources_role_app a,resources_app b where a.app_id = b.id and a.role_id = %d and b.app_type= %d ''' % (v_role_id,belong_env) app_list = db.query(str_sql) db.close() return app_list
def main(): from torndb import Connection db = Connection('localhost', 'evedump', user='******', password='******') item = ItemFactory(db, sys.argv[1]) print item.cost print item.minsell print item.profit
def get_latest_article(self, from_user, to_user, timestamp): db = Connection(settings.DATABASE_SERVER, settings.DATABASE_NAME, settings.DATABASE_USER, settings.DATABASE_PASSWORD, ) sql = "SELECT * FROM articles ORDER BY time DESC LIMIT 1" article = db.query(sql)[0] return self.make_single_pic_response( from_user, to_user, timestamp, article["title"], article["profile"], article["picUrl"], "{0}/m#/article_details/{1}".format( settings.SITE_HTTP_URL, article["id"] ) )
def __init__(self): setting = dict(debug=options.debug, template_path=os.path.join(os.path.dirname(__file__), "templates")) self.db = Connection(host=options.mysql["host"] + ":" + options.mysql["port"], database=options.mysql["database"], user=options.mysql["username"], password=options.mysql["password"]) Model.setup_dbs({"db": self.db}) super(Application, self).__init__(handlers, **setting)
def remote_start_mysql_server(v_host,v_os_user,v_os_password, v_db_port): # v_mysql_version 1: mysql5 4:mariadb5 5:mariadb10 # db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') # 获得Mysql server 版本: mysql5,mariadb5,mariadb10 v_get_sql = r'''SELECT case mysql_version when 1 then 'mysql5' when 4 then 'mariadb5' when 5 then 'mariadb10' when 6 then 'percona5.6' when 7 then 'pxc5.6' end mysql_version from tag where ip='%s' and port=%d ''' % (v_host,int(v_db_port)) v_list = db.get(v_get_sql) v_mysql_version = v_list['mysql_version'] db.close() #v_db_socket='--socket=/tmp/mysql'+str(v_db_port)+'.sock' #nohup 防止paramiko 进程退出后,中断执行 # 必须要加 1>/dev/null 2>&1,否则命令不执行。可能是paramiko 模式下,不加的话,执行命令时日志无法输出 v_exe_cmd = r'''nohup /apps/svr/%s/bin/mysqld_safe --defaults-file=%s/%s_%d.cnf 1>/dev/null 2>&1 &''' % ( v_mysql_version,config.mysql_conf_path,v_mysql_version,v_db_port) print v_exe_cmd # 远程paramiko调用 在本机执行sql result = remote_shell_cmd_no_result(v_host,v_os_user,v_os_password,v_exe_cmd) time.sleep(20) # 等待20秒 mysql 完全启动,更好的方法是,做个循环判断mysql 完全起来了,再退出 return result #返回空串表示成功
def create_mysql_engine(mysql_url=None): """ 创建mysql连接 """ from torndb import Connection from config import MYSQL as MYSQL_URL protocol, host, port, user, password, database, charset, timezone = ParseMySQL( mysql_url or MYSQL_URL, callback="tuple") return Connection(host="{}:{}".format(host, port), database=database, user=user, password=password, time_zone=timezone, charset=charset)
def list_object(info): """List the objects of the given rackspace""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # check if rackspace is correct rs_info = opts['hostname'].split(':') colo = "".join(rs_info[0:1]) row = "".join(rs_info[1:2]) rack = "".join(rs_info[2:3]) if not rack: print "The rackspace is not correct" return False # get rack_id for item in db.query( "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'" .format(rack, colo, row)): rack_id = item.id if not rack_id: print "Failed to get rack_id" return False # get object_id object_id_list = [] for item in db.query( "select * from RackSpace where rack_id={0}".format(rack_id)): object_id_list.append(item.object_id) if len(object_id_list) == 0: print "Failed to get object_id" return False # get rid of the duplicated items then sort and read one by one for object_id in sorted(list(set(object_id_list))): for item in db.query( "select * from Object where id={0}".format(object_id)): object_name = item.name object_type_id = item.objtype_id for item in db.query( "select * from Dictionary where dict_key={0}".format( object_type_id)): object_type_name = item.dict_value print "{0}: {1}".format(object_type_name, object_name) # close db db.close() return True
def get_conn(db_name, pre_sqls=None): if db_name in _CONNS_: return _CONNS_[db_name] pre_sqls = pre_sqls or [] _CONNS_[db_name] = db = Connection(host='localhost', database=db_name, user='******', password='******') db._db_args.pop('init_command', None) db.execute("set TIME_ZONE = 'SYSTEM'") for sql in pre_sqls: db.execute(sql) return db
def read_db(info): """Get info from Racktables DB""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # check if object_id already exists object_id = "" for item in db.query("select * from Object where name='{0}'".format( info['hostname'])): object_id = item.id if not object_id: print "Object:{0} does not exist".format(info['hostname']) return False # get the location info rack_id_list = [] unit_no_list = [] for item in db.query( "select rack_id,unit_no from RackSpace where object_id=(select id from Object where name='{0}')" .format(info['hostname'])): rack_id_list.append(int(item.rack_id)) unit_no_list.append(int(item.unit_no)) if not item: print "Object:{0} does not have location info".format(info['hostname']) return False rack_id = ','.join(str(i) for i in list(set(rack_id_list))) unit_no = ','.join(str(i) for i in list(set(unit_no_list))) location_name = "" row_name = "" rack_name = "" for item in db.query( "select location_name,row_name,name from Rack where id='{0}'". format(rack_id)): location_name = item.location_name row_name = item.row_name rack_name = item.name if not location_name or not row_name or not rack_name: print "Object:{0} does not have location info".format(info['hostname']) return False print "RACKSPACE: {0}:{1}:{2}:{3}".format(location_name, row_name, rack_name, unit_no) # close db db.close() return { 'location_name': location_name, 'row_name': row_name, 'rack_name': rack_name, 'unit_no': unit_no }
def main(): db1 = Connection(options.database_host, options.database_user, options.database_password) db1.execute("DELETE FROM post WHERE 1") ssfs = db1.query("""SELECT shake_id, sharedfile_id from shakesharedfile order by created_at""") for shakesharedfile in ssfs: sf = db1.get("""SELECT id, source_id, name, deleted, created_at FROM sharedfile WHERE id = %s""", shakesharedfile['sharedfile_id']) print "%s. Adding posts for sharedfile: %s created at %s." % (sf.id, sf.name, sf.created_at) add_posts(shake_id=shakesharedfile['shake_id'], sharedfile_id=sf['id'], sourcefile_id=sf['source_id'], deleted=sf['deleted'], created_at=sf['created_at'])
def remote_off_get_datadir_path(v_host,v_db_port,v_type): # v_mysql_version 1: mysql5 4:mariadb5 5:mariadb10 # db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') # 获得Mysql server 版本: mysql5,mariadb5,mariadb10 v_get_sql = r'''SELECT case mysql_version when 1 then 'mysql5' when 4 then 'mariadb5' when 5 then 'mariadb10' when 6 then 'percona5.6' when 7 then 'pxc5.6' end mysql_version from tag where ip='%s' and port=%d ''' % (v_host,int(v_db_port)) v_list = db.get(v_get_sql) v_mysql_version = v_list['mysql_version'] db.close() # /apps/dbdat/mariadb10_data3306 # if v_type==1: v_datadir_path = '%s/%s_data%s' % (config.mysql_datadir_path,v_mysql_version,v_db_port) else: v_datadir_path = '%s_data%s' % (v_mysql_version,v_db_port) print v_datadir_path return v_datadir_path
def update(self): config = self.application.config db = Connection( "%s:%s" % (config.MYSQL_HOST, config.MYSQL_PORT), config.MYSQL_DB, user=config.MYSQL_USER, password=config.MYSQL_PASS ) query = "select project_id, public_key, secret_key from sentry_projectkey" logging.info("Executing query %s in MySQL" % query) projects = {} try: db_projects = db.query(query) if db_projects is None: logging.warn("Could not retrieve nformation from sentry's database because MySQL Server was unavailable") return for project in db_projects: logging.info("Updating information for project with id %s..." % project.project_id) if not project.project_id in projects.keys(): projects[project.project_id] = { "public_key": [], "secret_key": [] } projects[project.project_id]['public_key'].append(project.public_key) projects[project.project_id]['secret_key'].append(project.secret_key) self.application.project_keys = projects finally: db.close()
def search_for_articles(self, from_user, to_user, timestamp, query): db = Connection(settings.DATABASE_SERVER, settings.DATABASE_NAME, settings.DATABASE_USER, settings.DATABASE_PASSWORD, ) # Simple way to avoid SQL insertion attack if query.strip() and ";" not in query: condition = u"""WHERE UPPER(title) LIKE '%%{0}%%' OR UPPER(profile) LIKE '%%{0}%%' OR UPPER(author) LIKE '%%{0}%%' OR UPPER(content) LIKE '%%{0}%%' """.format(query.strip().upper()) else: condition = "" sql = u"SELECT * FROM articles {0} ORDER BY time DESC LIMIT 10".format( condition) articles = db.query(sql) if len(articles) > 0: return self.make_multi_pic_response( from_user, to_user, timestamp, [article["title"] for article in articles], [article["profile"] for article in articles], [article["picUrl"] for article in articles], ["{0}/m#/article_details/{1}".format( settings.SITE_HTTP_URL, article["id"] ) for article in articles]) else: return self.make_text_response( from_user, to_user, timestamp, u"还没有关于“%s”的内容哦!你有什么想法呢?告诉微君吧!" % query)
def __init__(self): self.db = Connection(host=mysql_config.get('host', '127.0.0.1'), database=mysql_config.get('database', 'test'), user=mysql_config.get('user', 'root'), password=mysql_config.get('password', '')) self.executor = ThreadPoolExecutor(max_workers=4) # self.requests = requests.Session() self.headers = { 'Host': 'www.zhihu.com', 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) Gecko/20100101 Firefox/42.0', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'Referer': 'http://www.zhihu.com/topics', }
def run_server(conf_root=''): conf_file = os.path.abspath( os.path.join (conf_root, CONFIG_FILE)) if os.path.exists(conf_file): tornado.options.parse_config_file( conf_file ) tornado.options.parse_command_line() db = Connection(options.mysql_host, options.mysql_name, options.mysql_user, options.mysql_passwd) usersdb = {} if options.auth_file == 'MYSQL': users = DbSqlAuth(options.realm, db) usersdb = users._usersdb else: auth_file = os.path.abspath( os.path.join (conf_root, options.auth_file) ) if os.path.exists(auth_file): users = DbFileAuth(options.realm, auth_file) usersdb = users._usersdb if options.auth_type == 'basic': auth = BasicAuth (usersdb, options.realm) elif options.auth_type == 'digest': auth = DigestAuth (usersdb, options.realm) else: auth = None settings = { "static_path": os.path.join(os.path.dirname(__file__), "static"), } application = DavApplication (options.root, auth, db, settings) use_ssl = options.use_ssl if use_ssl: ssl_options=dict( certfile=options.ssl_cretfile, keyfile=options.ssl_keyfile, ) http_server = tornado.httpserver.HTTPServer(application, ssl_options = ssl_options) else: http_server = tornado.httpserver.HTTPServer(application) http_server.listen(options.port) tornado.ioloop.IOLoop.instance().start()
def delete_posts(sharedfile_id=0, **kwargs): """ This task will update the post table setting any post containing the shared file to deleted = 1 """ db = Connection(options.database_host, options.database_name, options.database_user, options.database_password) db.execute( """UPDATE post SET deleted=1 WHERE sharedfile_id = %s AND deleted=0""", sharedfile_id) db.close()
def calculate_likes(sharedfile_id, **kwargs): """ This task will get all likes for a shared file, count them, then update the sharedfile.like_count """ db = Connection(options.database_host, options.database_name, options.database_user, options.database_password) result = db.get( "SELECT count(id) as like_count from favorite where sharedfile_id = %s and deleted=0", (sharedfile_id)) like_count = int(result['like_count']) db.execute("UPDATE sharedfile set like_count = %s WHERE id = %s", like_count, sharedfile_id) tweet_or_magic(db, sharedfile_id, like_count) db.close()
def list_object(info): """List the objects of the given rackspace""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # check if rackspace is correct rs_info = opts["hostname"].split(":") colo = "".join(rs_info[0:1]) row = "".join(rs_info[1:2]) rack = "".join(rs_info[2:3]) if not rack: print "The rackspace is not correct" return False # get rack_id for item in db.query( "select * from Rack where name = '{0}' and location_name = '{1}' and row_name = '{2}'".format(rack, colo, row) ): rack_id = item.id if not rack_id: print "Failed to get rack_id" return False # get object_id object_id_list = [] for item in db.query("select * from RackSpace where rack_id={0}".format(rack_id)): object_id_list.append(item.object_id) if len(object_id_list) == 0: print "Failed to get object_id" return False # get rid of the duplicated items then sort and read one by one for object_id in sorted(list(set(object_id_list))): for item in db.query("select * from Object where id={0}".format(object_id)): object_name = item.name object_type_id = item.objtype_id for item in db.query("select * from Dictionary where dict_key={0}".format(object_type_id)): object_type_name = item.dict_value print "{0}: {1}".format(object_type_name, object_name) # close db db.close() return True
def __init__(self): #设置全局超时时间(如连接超时) self.timeout = 2 #建立redis单机或集群连接 self.redis = from_url(REDIS) #解析mysql配置并建立读写分离连接 self._minfo = ParseMySQL(MYSQL) self._mysql = Connection(host="%s:%s" % (self._minfo.get( 'Host', '127.0.0.1'), self._minfo.get('Port', 3306)), user=self._minfo.get('User', 'root'), password=self._minfo.get('Password'), database=self._minfo.get('Database'), time_zone=self._minfo.get( 'Timezone', '+8:00'), charset=self._minfo.get('Charset', 'utf8'), connect_timeout=self.timeout, max_idle_time=self.timeout) self.mysql_read = self._mysql self.mysql_write = self._mysql self.asyncQueue = Queue(connection=self.redis) self.asyncQueueLow = Queue(name='low', connection=self.redis) self.asyncQueueHigh = Queue(name='high', connection=self.redis)
def read_db(info): """Get info from Racktables DB""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # check if object_id already exists object_id = "" for item in db.query("select * from Object where name='{0}'".format(info["hostname"])): object_id = item.id if not object_id: print "Object:{0} does not exist".format(info["hostname"]) return False # get the location info rack_id_list = [] unit_no_list = [] for item in db.query( "select rack_id,unit_no from RackSpace where object_id=(select id from Object where name='{0}')".format( info["hostname"] ) ): rack_id_list.append(int(item.rack_id)) unit_no_list.append(int(item.unit_no)) if not item: print "Object:{0} does not have location info".format(info["hostname"]) return False rack_id = ",".join(str(i) for i in list(set(rack_id_list))) unit_no = ",".join(str(i) for i in list(set(unit_no_list))) location_name = "" row_name = "" rack_name = "" for item in db.query("select location_name,row_name,name from Rack where id='{0}'".format(rack_id)): location_name = item.location_name row_name = item.row_name rack_name = item.name if not location_name or not row_name or not rack_name: print "Object:{0} does not have location info".format(info["hostname"]) return False print "RACKSPACE: {0}:{1}:{2}:{3}".format(location_name, row_name, rack_name, unit_no) # close db db.close() return {"location_name": location_name, "row_name": row_name, "rack_name": rack_name, "unit_no": unit_no}
def get(self): db = Connection("127.0.0.1", "zfz", "zfz", "zfz...891", 25200) q = self.get_argument(name="query", default="") p = self.get_argument(name="price", default="999999999") ma = re.search(r"\d+", p) if ma is None: p = "999999999" else: p = ma.group(0) q = q.lstrip().rstrip().replace("'", "").replace('"', "").replace("#", "").replace("%", "") qs = q.split(" ") if len(q) > 0: if len(qs) == 1: m = "%" + q + "%" items = db.query( "select title, url, price, area, arch, address, district " "from pages where price <= %s and (address like %s or district like %s or title like %s) " "order by date desc limit 20", p, m, m, m, ) else: l = qs[0] r = qs[-1] m1 = "" m2 = "" for i in range(1, len(qs) - 1): m1 += "%" + qs[i] m2 += "%" + qs[len(qs) - 1 - i] m1 += "%" m2 += "%" items = db.query( "select title, url, price, area, arch, address, district " "from pages where price <= %s and ((address like %s and district like %s) or " "(address like %s and district like %s) or " "(title like %s and address like %s) or " "(title like %s and address like %s and district like %s) or " # "title like %s or " "address like %s) " "order by date desc limit 20", p, "%" + l + m1, "%" + r + "%", "%" + r + m2, "%" + l + "%", "%" + l + "%", m1 + r + "%", "%" + l + "%", m1, "%" + r + "%", # '%' + l + m1 + r + '%', "%" + l + m1 + r + "%", ) else: items = [] if len(items) < 1: hit = False else: hit = True if p == "999999999": p = "" self.render("search.html", query=q, price=p, items=items, hit=hit)
def upload_auto_increment(): #连接配置中心库 db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') v_sql = r"""SELECT id,ip,port from tag b where online_flag=1 and is_showprocesslist=1""" print v_sql upload_server_list = db.query(v_sql) if upload_server_list: # 对实例表进行循环 i=0 for upload_server in upload_server_list: instance_id = upload_server['id'] host_ip = upload_server['ip'] mysql_port = upload_server['port'] v_host =host_ip + ':' + str(mysql_port) print v_host #连接远程实例 db_remote = Connection(v_host, 'information_schema', config.DBA_QUERY_USER, config.DBA_QUERY_PASSWD, time_zone='+8:00') v_pl = r"""select information_schema . columns . TABLE_SCHEMA AS TABLE_SCHEMA, information_schema . columns . TABLE_NAME AS TABLE_NAME, information_schema . columns . COLUMN_NAME AS COLUMN_NAME, information_schema . columns . DATA_TYPE AS DATA_TYPE, information_schema . columns . COLUMN_TYPE AS COLUMN_TYPE, ((case information_schema . columns . DATA_TYPE when 'tinyint' then 255 when 'smallint' then 65535 when 'mediumint' then 16777215 when 'int' then 4294967295 when 'bigint' then 18446744073709551615 end) >> if((locate('unsigned', information_schema . columns . COLUMN_TYPE) > 0), 0, 1)) AS MAX_VALUE, information_schema . tables . AUTO_INCREMENT AS AUTO_INCREMENT, (information_schema . tables . AUTO_INCREMENT / ((case information_schema . columns . DATA_TYPE when 'tinyint' then 255 when 'smallint' then 65535 when 'mediumint' then 16777215 when 'int' then 4294967295 when 'bigint' then 18446744073709551615 end) >> if((locate('unsigned', information_schema . columns . COLUMN_TYPE) > 0), 0, 1))) AS AUTO_INCREMENT_RATIO from (information_schema . columns join information_schema . tables on(((information_schema . columns . TABLE_SCHEMA = information_schema . tables . TABLE_SCHEMA) and (information_schema . columns . TABLE_NAME = information_schema . tables . TABLE_NAME)))) where ((information_schema . columns .TABLE_SCHEMA not in ('mysql', 'INFORMATION_SCHEMA', 'performance_schema','common_schema')) and (information_schema . tables . TABLE_TYPE = 'BASE TABLE') and (information_schema . columns . EXTRA = 'auto_increment'))""" print v_pl process_list = db_remote.query(v_pl) for process_row in process_list: vp_server_ip = v_host vp_instance_id = instance_id #vp_id = process_row['ID'] vp_table_schema = process_row['TABLE_SCHEMA'] vp_table_name = process_row['TABLE_NAME'] vp_column_name = process_row['COLUMN_NAME'] vp_data_type = process_row['DATA_TYPE'] # 若是空,变成mysql的null,否则加上引号再传递进去,格式为%s ,而不是'%s' #if vp_db is None: # vp_db = 'NULL' #else: # vp_db = "'"+vp_db+"'" #print vp_db vp_column_type = process_row['COLUMN_TYPE'] vp_max_value = process_row['MAX_VALUE'] vp_auto_increment = process_row['AUTO_INCREMENT'] vp_auto_increment_ratio = process_row['AUTO_INCREMENT_RATIO'] #vp_info = process_row['INFO'] #if vp_info is None: # vp_info = 'NULL' #else: # #vp_info = "'"+vp_info+"'" # vp_info = vp_info.replace('"',"'") # 双引号替换为单引号 # vp_info = '"'+vp_info+'"' # 防止字符里面本身包含单引号 # v_insert_sql='''insert into log_processlist(instance_id,TID,USER,HOST,DB,COMMAND,TIME,STATE,INFO, # TIME_MS,ROWS_SENT,ROWS_EXAMINED) values(%d,%d,'%s','%s','%s','%s',%d,'%s','%s',%d,%d,%d)''' % ( # instance_id,vp_id,vp_user,vp_host,vp_db,vp_command,vp_time,vp_state,vp_info,vp_time_ms,vp_rows_sent,vp_rows_examined) #try: v_delete_sql='''delete from auto_increment_list where INSTANCE_ID = '%s' and TABLE_SCHEMA = '%s' and TABLE_NAME = '%s' and COLUMN_NAME = '%s' ''' % ( vp_instance_id,vp_table_schema,vp_table_name,vp_column_name) print v_delete_sql db.execute(v_delete_sql) v_insert_sql='''insert into auto_increment_list(instance_id,server_ip,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,DATA_TYPE,COLUMN_TYPE,MAX_VALUE,AUTO_INCREMENT,AUTO_INCREMENT_RATIO) values(%d,'%s','%s','%s','%s','%s','%s',%d,%d,%d)''' % ( vp_instance_id,vp_server_ip,vp_table_schema,vp_table_name,vp_column_name,vp_data_type,vp_column_type,int(vp_max_value),int(vp_auto_increment),int(vp_auto_increment_ratio)) print v_insert_sql db.execute(v_insert_sql) #db.execute(v_insert_sql) # except Exception, e: # print e.message # print v_insert_sql # text = "insert process_list error!," + e.message + ',' + v_insert_sql # log_w(text) db_remote.close() i=i+1 v_sendmail_sql=r'''select count(*) count from auto_increment_list b where AUTO_INCREMENT_RATIO >= 80 ''' if_list = db.query(v_sendmail_sql) for if_row in if_list: v_if = if_row['count'] print v_if if v_if >= 1 : print '11' v_warn_sql=r'''select SERVER_IP,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,MAX_VALUE,AUTO_INCREMENT,AUTO_INCREMENT_RATIO from auto_increment_list b where AUTO_INCREMENT_RATIO >= 80 ''' print v_warn_sql warn_list = db.query(v_warn_sql) v_server_ip = '\r\n' for warn_row in warn_list: v_server_ip = v_server_ip + warn_row['SERVER_IP'] +'|对象名:' + warn_row['TABLE_SCHEMA'] +'|表名:'+ warn_row['TABLE_NAME'] + '|字段名:' + warn_row['COLUMN_NAME']+'\r\n' print v_server_ip v_msg_text = v_server_ip v_receiver = '[email protected],[email protected]' v_subject = '有快溢出的自增ID,细节请去监控页面查看' v_return = func.send_mail_to_devs(v_receiver,v_subject,v_msg_text) db.close()
def update_db(info): """Automate server audit into Racktables""" # connect to racktables db db = Connection(rt_server, rt_dbname, rt_dbuser, rt_dbpass) # get object_type_id for item in db.query("select * from Dictionary where dict_value='{0}'".format(info["server_type"])): object_type_id = item.dict_key # delete object if already exists delete_object(info) # create object url = """http://{0}/racktables/index.php?module=redirect&page=depot&tab=addmore&op=addObjects""".format(rt_server) if info["server_type"] in ["Server", "XenServer"]: payload = """0_object_type_id={0}&0_object_name={1}&0_object_label=&0_object_asset_no={1}&got_fast_data=Go%21""".format( object_type_id, info["hostname"] ) if info["server_type"] in ["VM", "EC2"]: payload = """virtual_objects=&0_object_type_id={0}&0_object_name={1}&got_fast_data=Go%21""".format( object_type_id, info["hostname"] ) req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth) if req.status_code != requests.codes.ok: print "Failed to create object: {0}".format(info["hostname"]) return False else: print "OK - Created object: {0}".format(info["hostname"]) object_id = "" # get object_id for item in db.query("select * from Object where name='{0}'".format(info["hostname"])): object_id = item.id if not object_id: print "Failed to get object_id" return False # get os_release_id os_release_key = "" for item in db.query("select * from Dictionary where dict_value='{0}'".format(info["os_release"])): os_release_key = item.dict_key if not os_release_key: print "Failed to get object_type_id, please add '{0}' to 'Configuration - Dictionary - Server OS type'.".format( info["os_release"] ) return False # update the informations of object, all post data formats were got by firebug on firefox url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=edit&op=update""".format(rt_server) if info["server_type"] == "Server": payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no={2}&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\ &2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=2&4_value=0&5_attr_id=26&5_value=0&6_attr_id=10006&6_value={6}\ &7_attr_id=10003&7_value={7}&8_attr_id=1&8_value=&9_attr_id=28&9_value=&10_attr_id=21&10_value=&11_attr_id=4&11_value={8}\ &12_attr_id=24&12_value=&13_attr_id=10005&13_value={9}&14_attr_id=25&14_value=&num_attrs=15&object_comment=&submit.x=15&submit.y=13""".format( object_id, object_type_id, info["hostname"], info["cpu_cores"], quote_plus(info["disk"]), info["fqdn"], info["memory"], quote_plus(info["network"]), os_release_key, info["swap"], ) if info["server_type"] == "XenServer": payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no={2}&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\ &2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\ &7_attr_id=1&7_value=&8_attr_id=28&8_value=&9_attr_id=4&9_value={8}&10_attr_id=24&10_value=&11_attr_id=10005&11_value={9}\ &12_attr_id=25&12_value=&13_attr_id=10008&13_value={10}&num_attrs=14&object_comment=&submit.x=18&submit.y=21""".format( object_id, object_type_id, info["hostname"], info["cpu_cores"], quote_plus(info["disk"]), info["fqdn"], info["memory"], quote_plus(info["network"]), os_release_key, info["swap"], quote_plus(info["vm_list"]), ) if info["server_type"] == "EC2": payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no=&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\ &2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\ &7_attr_id=10010&7_value={8}&8_attr_id=4&8_value={9}&9_attr_id=24&9_value=&10_attr_id=10005&10_value={10}&num_attrs=11&object_comment=&submit.x=19&submit.y=27""".format( object_id, object_type_id, info["hostname"], info["cpu_cores"], quote_plus(info["disk"]), info["fqdn"], info["memory"], quote_plus(info["network"]), info["ec2_pubname"], os_release_key, info["swap"], ) if info["server_type"] == "VM": payload = """object_id={0}&object_type_id={1}&object_name={2}&object_label=&object_asset_no=&0_attr_id=14&0_value=&1_attr_id=10000&1_value={3}\ &2_attr_id=10004&2_value={4}&3_attr_id=3&3_value={5}&4_attr_id=26&4_value=0&5_attr_id=10006&5_value={6}&6_attr_id=10003&6_value={7}\ &7_attr_id=10007&7_value={8}&8_attr_id=4&8_value={9}&9_attr_id=24&9_value=&10_attr_id=10005&10_value={10}&num_attrs=11&object_comment=&submit.x=25&submit.y=14'""".format( object_id, object_type_id, info["hostname"], info["cpu_cores"], quote_plus(info["disk"]), info["fqdn"], info["memory"], quote_plus(info["network"]), info["resident_on"], os_release_key, info["swap"], ) req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth) if req.status_code != requests.codes.ok: print "Failed to update attributes" return False print "OK - Updated attributes" # ec2 servers don't need to update the ip pool if info["server_type"] not in ["EC2"]: # update the ip pool nics = ("".join(info["network"].split())).split(",") for i in nics: nic_info = i.split(":") nic_name = "".join(nic_info[0:1]) nic_addr = "".join(nic_info[1:2]) # check if nic_name is not correct if nic_name.isalnum(): # create nic url = """http://{0}/racktables/index.php?module=redirect&page=object&tab=ip&op=add""".format(rt_server) payload = """object_id={0}&bond_name={1}&ip={2}&bond_type=regular&submit.x=11&submit.y=6""".format( object_id, nic_name, nic_addr ) req = requests.post(url, data=payload, headers=rt_headers, auth=rt_auth) if req.status_code != requests.codes.ok: print "Failed to update ip pool for {0}:{1}".format(nic_name, nic_addr) return False print "OK - Updated ip pool for {0}:{1}".format(nic_name, nic_addr) # virtual servers don't need to update the rackspace if info["server_type"] not in ["EC2", "VM"]: # update rack info update_rack(info, object_id) # close db db.close() # end return True
import sys import os try: from atserverwebapp.db import Model except ImportError: sys.path.append( os.path.abspath( os.path.join(os.path.dirname(os.path.abspath(__file__)), ".." + os.sep + ".."))) from atserverwebapp.db import Model class FsjModel(Model): def get_fsj_status(self): rows = self.db.query("""select * from fszmonitor""") if rows: return rows return [] if __name__ == "__main__": from torndb import Connection db = Connection(host="atserver" + ":" + "3306", database="atdb", user="", password="") Model.setup_dbs({"db": db}) fsjm = fsjModel() fsjm.get_fsj_status()
# Torndb is a very thin wrapper around MySQLdb that makes it even easier to use MySQL. # Because it is very light, one can just go through the one-file python source # to learn how to use it. # Installation: pip install torndb # Official doc: http://torndb.readthedocs.org/en/latest/ # Source: https://github.com/bdarnell/torndb/blob/master/torndb.py from torndb import Connection # Connect by IP address db = Connection('127.0.0.1', 'database_name', user='******', password='******') # Connect by IP address with port db = Connection('127.0.0.1:1234', 'database_name', user='******', password='******') # Connect by socket db = Connection('/tmp/mysql.sock', 'database_name', user='******', password='******') # Connection over SSH, open a SSH tunnel with # ssh -L 1234:localhost:3306 [email protected] # then connect to 127.0.0.1:1234 # Retreive one object post = db.get("SELECT * FROM posts LIMIT 1") # Retreive several objects for post in db.query("SELECT * FROM posts"): print post.title # Insert one entry db.execute("INSERT INTO posts (user_id, content) VALUES (%s, %s)", 12, "hello world !") # Insert multiple entries
def get(self): db = Connection('127.0.0.1', 'zfz', 'zfz', 'zfz...891', 25200) q = self.get_argument(name="query", default="") p = self.get_argument(name="price", default="999999999") ma = re.search(r'\d+', p) if ma is None: p = "999999999" else: p = ma.group(0) q = q.lstrip().rstrip().replace("'", "").replace('"', '').replace( '#', '').replace('%', '') qs = q.split(' ') if len(q) > 0: if len(qs) == 1: m = '%' + q + '%' items = db.query( "select title, url, price, area, arch, address, district " "from pages where price <= %s and (address like %s or district like %s or title like %s) " "order by date desc limit 20", p, m, m, m) else: l = qs[0] r = qs[-1] m1 = '' m2 = '' for i in range(1, len(qs) - 1): m1 += '%' + qs[i] m2 += '%' + qs[len(qs) - 1 - i] m1 += '%' m2 += '%' items = db.query( "select title, url, price, area, arch, address, district " "from pages where price <= %s and ((address like %s and district like %s) or " "(address like %s and district like %s) or " "(title like %s and address like %s) or " "(title like %s and address like %s and district like %s) or " # "title like %s or " "address like %s) " "order by date desc limit 20", p, '%' + l + m1, '%' + r + '%', '%' + r + m2, '%' + l + '%', '%' + l + '%', m1 + r + '%', '%' + l + '%', m1, '%' + r + '%', # '%' + l + m1 + r + '%', '%' + l + m1 + r + '%') else: items = [] if len(items) < 1: hit = False else: hit = True if p == "999999999": p = "" self.render("search.html", query=q, price=p, items=items, hit=hit)
def add_posts(shake_id=0, sharedfile_id=0, sourcefile_id=0, deleted=0, created_at=None, **kwargs): """ This task will get the subscribers for a shake and insert a post for every user. If the sourcefile_id exists in posts it will set "seen" equal to 1. """ db = Connection(options.database_host, options.database_name, options.database_user, options.database_password) #get subscribers for shake_id results = db.query( """SELECT user_id as id FROM subscription WHERE shake_id = %s and deleted = 0""", shake_id) for user in results: seen = 0 #does this sourcefile exist in post for this user? existing = db.query( """SELECT id FROM post WHERE user_id = %s and sourcefile_id = %s and deleted=0 ORDER BY created_at desc LIMIT 1""", user['id'], sourcefile_id) #if so, set seen = 1 if existing: seen = 1 #if not, insert a new row if created_at: db.execute( """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, %s)""", user['id'], sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) else: db.execute( """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, NOW())""", user['id'], sourcefile_id, sharedfile_id, seen, deleted, shake_id) #now insert a post for the user who originally posted it. sharedfile = db.get("""SELECT user_id from sharedfile WHERE id = %s""", sharedfile_id) db.execute( """INSERT INTO post (user_id, sourcefile_id, sharedfile_id, seen, deleted, shake_id, created_at) VALUES (%s, %s, %s, %s, %s, %s, NOW())""", sharedfile['user_id'], sourcefile_id, sharedfile_id, 1, 0, shake_id) db.close()
def upload_user_stats(): #连接配置中心库 db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') db.execute('truncate table meta_table_statistics') # 先truncate 再插入 db.execute('truncate table meta_index_statistics') # 先truncate 再插入 # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64 v_sql = r"""SELECT b.ip,b.port,b.id as instance_id,a.id as schema_id,a.name as db_name from resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id""" #print v_sql upload_tables_list = db.query(v_sql) if upload_tables_list: # 对实例表进行循环 i=0 for upload_table in upload_tables_list: instance_id = upload_table['instance_id'] schema_id = upload_table['schema_id'] db_name = upload_table['db_name'] host_ip = upload_table['ip'] mysql_port = upload_table['port'] v_host =host_ip + ':' + str(mysql_port) #连接远程实例 db_remote = Connection(v_host, 'information_schema', config.DBA_QUERY_USER, config.DBA_QUERY_PASSWD, time_zone='+8:00') # 取表的元信息然后插入 v_pl = r"""SELECT table_schema,table_name,rows_read,rows_changed,rows_changed_x_indexes from table_statistics where table_schema='%s' """ % (db_name) #print v_pl table_list = db_remote.query(v_pl) for table_row in table_list: table_schema = table_row['table_schema'] table_name = table_row['table_name'] rows_read = table_row['rows_read'] rows_changed = table_row['rows_changed'] rows_changed_x_indexes = table_row['rows_changed_x_indexes'] #try: v_insert_sql='''insert into meta_table_statistics(instance_id,schema_id,table_schema, table_name,rows_read,rows_changed,rows_changed_x_indexes) values(%d,%d,'%s','%s',%d,%d,%d)''' % ( instance_id,schema_id,table_schema, table_name,rows_read,rows_changed,rows_changed_x_indexes ) #print v_insert_sql db.execute(v_insert_sql.replace('%','%%')) # except Exception, e: # print e.message # print v_insert_sql # text = "insert meta tables error!," + e.message + ',' + v_insert_sql # log_w(text) # 取索引的元信息,然后插入 v_pl2 = r"""SELECT table_schema,table_name,index_name,rows_read from index_statistics where TABLE_SCHEMA='%s' """ % (db_name) table_list2 = db_remote.query(v_pl2) for table_row2 in table_list2: table_schema = table_row2['table_schema'] table_name = table_row2['table_name'] index_name = table_row2['index_name'] rows_read = table_row2['rows_read'] #try: v_insert_sql2='''insert into meta_index_statistics(instance_id,schema_id,table_schema, table_name,index_name,rows_read) values(%d,%d,'%s','%s','%s',%d)''' % ( instance_id,schema_id,table_schema, table_name,index_name,rows_read ) #print v_insert_sql db.execute(v_insert_sql2.replace('%','%%')) # except Exception, e: # print e.message # print v_insert_sql # text = "insert meta tables error!," + e.message + ',' + v_insert_sql # log_w(text) db_remote.close() # 开始生成log_hot_tables 统计表数据 # statistic_time 为统计数据的实际生成时间 v_insert_sql3='''insert into log_hot_tables(instance_id,schema_id,table_schema, table_name,TABLE_COMMENT,TABLE_CREATE_TIME,rows_read,rows_changed, rows_changed_x_indexes) select a.instance_id,a.schema_id,a.TABLE_SCHEMA, a.TABLE_NAME,b.TABLE_COMMENT,b.CREATE_TIME,a.ROWS_READ,a.ROWS_CHANGED, a.ROWS_CHANGED_X_INDEXES from meta_table_statistics a,meta_tables b where a.instance_id=%d and a.schema_id=%d and a.TABLE_SCHEMA='%s' and a.instance_id=b.instance_id and a.TABLE_SCHEMA=b.TABLE_SCHEMA and a.TABLE_NAME=b.TABLE_NAME''' % ( instance_id,schema_id,table_schema ) print v_insert_sql3 db.execute(v_insert_sql3.replace('%','%%')) i=i+1 db.close() # 开始远程flush 统计信息表 # for remote execute os_user = '******' OS_APPS_PASSWD = config.OS_APPS_PASSWD DB_USER = config.DB_USER DB_PASSWD = config.DB_PASSWD #连接配置中心库 db = Connection('/tmp/mysql3306.sock', config.DB_NAME, config.DB_USER, config.DB_PASSWD, time_zone='+8:00') # 由于id为64的E4S 服务器不在平台管辖,先手工剔除 b.id !=64 v_sql = r"""SELECT distinct b.ip,b.port from resources_schema a,tag b where b.online_flag=1 and a.owner=b.id and b.id !=64 order by a.id,b.id""" #print v_sql server_list = db.query(v_sql) if server_list: # 对实例表进行循环 i=0 for single_server in server_list: host_ip = single_server['ip'] mysql_port = single_server['port'] # 远程paramiko调用 在本机执行sql exe_sql = 'flush table_statistics;flush index_statistics;' result = func.remote_exe_sql(host_ip,os_user,OS_APPS_PASSWD,mysql_port,'information_schema',exe_sql,DB_USER,DB_PASSWD) if result == '': result = '执行成功!' print result