def mqCallback(self, channel, method_frame, header_frame, body): try: if not self.zk.is_proxy_master(): return # master's business data_dict = cjson.decode(body) # ** MUST ** ack channel.basic_ack(method_frame.delivery_tag) utils.log(utils.cur(), body, data_dict) if not isinstance(data_dict, dict): return for db, forbid in data_dict.iteritems(): if not forbid[Forbid.KEY_TYPE] in (Forbid.FORBID_WORKING, Forbid.FORBID_FOREVER): return forbid[Forbid.KEY_START] = time.time() path = os.path.join(ZKConf.ZK_PATH_FORBID, db) orig = self.get_path(path) if orig is False: self.zk.mknode(path, cjson.encode(forbid)) else: old = cjson.decode(orig) if old[Forbid.KEY_TYPE] == forbid[Forbid.KEY_TYPE] and \ old[Forbid.KEY_TYPE] == Forbid.FORBID_WORKING and \ old[Forbid.KEY_START] + old[Forbid.KEY_DURATION] > time.time(): utils.log(utils.cur(), "still forbidding") else: utils.log(utils.cur(), "change forbid") # change /database/forbid/db self.forbidinfo[db] = forbid self.zk.set(path, cjson.encode(forbid)) except Exception, err: utils.err(utils.cur(), err)
def mqCallback(self, channel, method_frame, header_frame, body): try: if not self.zk.is_proxy_master(): return # master's business data_dict = cjson.decode(body) # ** MUST ** ack channel.basic_ack(method_frame.delivery_tag) utils.log(utils.cur(), body, data_dict) if not isinstance(data_dict, dict): return for db, forbid in data_dict.iteritems(): if not forbid[Forbid.KEY_TYPE] in (Forbid.FORBID_WORKING, Forbid.FORBID_FOREVER): return forbid[Forbid.KEY_START] = time.time() path = os.path.join(ZKConf.ZK_PATH_FORBID, db) orig = self.get_path(path) if orig is False: self.zk.mknode(path, cjson.encode(forbid)) else: old = cjson.decode(orig) if ( old[Forbid.KEY_TYPE] == forbid[Forbid.KEY_TYPE] and old[Forbid.KEY_TYPE] == Forbid.FORBID_WORKING and old[Forbid.KEY_START] + old[Forbid.KEY_DURATION] > time.time() ): utils.log(utils.cur(), "still forbidding") else: utils.log(utils.cur(), "change forbid") # change /database/forbid/db self.forbidinfo[db] = forbid self.zk.set(path, cjson.encode(forbid)) except Exception, err: utils.err(utils.cur(), err)
def _zookeeperAuth(self, auth): is_legal = True """ 1. check whether client IP is authorized """ try: ippath = os.path.join(ZKConf.ZK_PATH_DB, auth["database"], ZKConf.KEY_IP) # utils.log(utils.cur(), ippath) is_legal = self.factory.get_path(ippath) # utils.log(utils.cur(), is_legal) if is_legal is not False: ip_json = is_legal is_legal = True # utils.log(utils.cur(), ip_json) try: self.ips = ip_helper.IpRangeList(*tuple(cjson.decode(ip_json))) except: self.ips = None peer = self.transport.getPeer() utils.log(utils.cur(), peer, self.ips, self.factory.ips) if not ((self.ips and peer.host in self.ips) or (self.factory.ips and peer.host in self.factory.ips)): is_legal = False ip_error = dict(ErrorCode.IP_RESTRICTED) ip_error["message"] = ip_error["message"] % {"ip":peer.host} self._write(self._goWrong(ip_error, self.next_idx)) # 2 return is_legal except Exception, err: utils.err(utils.cur(), traceback.format_exc()) is_legal = False
def watcher_rw_ip(self, event, true_path): self.reload_zkdbinfo() host = self.get_path(true_path) paths = true_path.split("/") db, rw_type = paths[3], paths[-1] # 1. reset idle_rwclient connections backends = self.servers[db] if db in self.servers else [] utils.log(utils.cur(), true_path, host, backends.copy() if backends else [], len(backends)) # reset connections when new IP and old IP are different for rwclient, pushtime in (backends.copy() if backends else []): oldhost = rwclient.getHost(rw_type) utils.log(utils.cur(), oldhost, host) if oldhost and oldhost != host: rwclient.disconnect(host, rw_type) # 2. reset busy_proobj rwclient connections utils.log(utils.cur(), self.busy_proobj) # reset connections when new IP and old IP are different for proobj in self.busy_proobj[db]: if proobj.server: oldhost = proobj.server.getHost(rw_type) if oldhost and oldhost != host: utils.log(utils.cur(), oldhost, host) proobj.server.disconnect(host, rw_type)
def watcher_rw_ip(self, event, true_path): self.reload_zkdbinfo() host = self.get_path(true_path) paths = true_path.split("/") db, rw_type = paths[3], paths[-1] # 1. reset idle_rwclient connections backends = self.servers[db] if db in self.servers else [] utils.log(utils.cur(), true_path, host, backends.copy() if backends else [], len(backends)) # reset connections when new IP and old IP are different for rwclient, pushtime in backends.copy() if backends else []: oldhost = rwclient.getHost(rw_type) utils.log(utils.cur(), oldhost, host) if oldhost and oldhost != host: rwclient.disconnect(host, rw_type) # 2. reset busy_proobj rwclient connections utils.log(utils.cur(), self.busy_proobj) # reset connections when new IP and old IP are different for proobj in self.busy_proobj[db]: if proobj.server: oldhost = proobj.server.getHost(rw_type) if oldhost and oldhost != host: utils.log(utils.cur(), oldhost, host) proobj.server.disconnect(host, rw_type)
def verify(self, dbtype, forceOk, proxyconns, busyobj, idx): tag = SQLState.SQL_NORMAL if self.sql and len(self.tokens) == 0: self.tokenize() if not len(self.tokens): err_dict = dict(ErrorCode.SQL_FORBIDDEN) return self.tuple("err", tag, err_dict, self.opts) use_master = False first_token, first_token_id = self.tokens[0] utils.log(utils.cur(), first_token, first_token_id, self.tokens) if first_token_id == sce_token.TK_SQL_SELECT: # when self.tokens[1:] is None, use_master is still False for token, token_id in self.tokens[1:]: # select last_insert_id(); or select row_count(); if token_id == sce_token.TK_FUNCTION and token in ("LAST_INSERT_ID", "ROW_COUNT"): use_master = True break # select @@insert_id; elif token_id == sce_token.TK_LITERAL and token in ("@@INSERT_ID", "@@IDENTITY"): use_master = True break else: use_master = False else: use_master = True if forceOk: return self.tuple(use_master, tag, "", self.opts) wls = self.merged_wls if dbtype == ZKConf.INTERNAL else self.whitelists if first_token not in wls: err_dict = dict(ErrorCode.SQL_FORBIDDEN) return self.tuple("err", tag, err_dict, self.opts) whitelist = wls[first_token] assert isinstance(whitelist, (list, tuple)), "whitelist must be a list or tuple" privs = self.get_privs(whitelist) if not privs: err_dict = dict(ErrorCode.SQL_FORBIDDEN) return self.tuple("err", tag, err_dict, self.opts) specials = [x for x in privs if not x[2][0] == SQLState.SQL_NORMAL] utils.log(utils.cur(), privs, specials) for priv in specials: sql_state = priv[2][0] if sql_state == SQLState.SQL_ENGINE: tmp = self.deal_sql_engine(tag) if tmp: return tmp elif sql_state == SQLState.SQL_PRIVATE: tmp = self.deal_sql_private(tag, proxyconns, busyobj, idx) if tmp: return tmp return self.tuple(use_master, tag, "", self.opts)
def check(self): forbidinfo = dict(self.sf.forbidinfo) for db, info in forbidinfo.iteritems(): if not info[Forbid.KEY_TYPE] in (Forbid.FORBID_WORKING, Forbid.FORBID_FOREVER): utils.log(utils.cur(), "erase forbid", db) self.sf.zk.erase_forbid(db) if info[Forbid.KEY_TYPE] == Forbid.FORBID_WORKING and \ info[Forbid.KEY_START] + info[Forbid.KEY_DURATION] < time.time(): utils.log(utils.cur(), "erase forbid", db) self.sf.zk.erase_forbid(db)
def unpack_command(packet, idx = 0): idx = skip_header() tag, idx = unpack_int8(packet, idx) if tag == Command.COM_QUERY: cmd, idx = unpack_string(packet, len(packet) - idx, idx) utils.log(utils.cur(), cmd) return tag, cmd elif tag == Command.COM_STMT_EXECUTE: statement_id, idx = unpack_int32(packet, idx) utils.log(utils.cur(), "Statement ID", statement_id) return tag, statement_id return tag, 0
def unpack_command(packet, idx=0): idx = skip_header() tag, idx = unpack_int8(packet, idx) if tag == Command.COM_QUERY: cmd, idx = unpack_string(packet, len(packet) - idx, idx) utils.log(utils.cur(), cmd) return tag, cmd elif tag == Command.COM_STMT_EXECUTE: statement_id, idx = unpack_int32(packet, idx) utils.log(utils.cur(), "Statement ID", statement_id) return tag, statement_id return tag, 0
def _trans(cur, *args): try: keys = ",".join(["`%s`" % k for k in dbinfo.keys()]) vals = ",".join(["'%s'" % v for v in dbinfo.values()]) sql = "INSERT INTO `%s` (%s) VALUES(%s)" % ( DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), keys, vals, sql) cur.execute(sql) cur.execute("COMMIT") except Exception, e: cur.execute("ROLLBACK") utils.err(utils.cur(), e) raise Exception(e)
def _trans(cur, *args): cur.execute("START TRANSACTION") try: cur.execute("SELECT * FROM `%(table)s` WHERE dbtype='%(dbtype)s'" % dbtype) dbnums = cur.fetchall() if len(dbnums) == 0: insert_sql = """INSERT INTO `%(table)s` (`dbtype`, `maxconn`, `disk`, `network`, `slowquery_duration`, `slowquery_times`, `tablenums`, `tablerows`, `tablesize`, `cputime`) VALUES ('%(dbtype)s', %(maxconn)d, %(disk)d, %(network)d, %(slowquery_duration)d, %(slowquery_times)d, %(tablenums)d, %(tablerows)d, %(tablesize)d, %(cputime)d)""" % dbtype utils.log(utils.cur(), insert_sql) cur.execute(insert_sql) cur.execute("COMMIT") except Exception, err: utils.log(utils.cur(), err) cur.execute("ROLLBACK")
def _trans(cur, *args): try: keys = ",".join(["`%s`" % k for k in dbinfo.keys()]) vals = ",".join(["'%s'" % v for v in dbinfo.values()]) sql = "INSERT INTO `%s` (%s) VALUES(%s)" % (DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), keys, vals, sql) cur.execute(sql) cur.execute("COMMIT") except Exception, e: cur.execute("ROLLBACK") utils.err(utils.cur(), e) raise Exception(e)
def _exec(data, record): if len(data): sql = ",".join(["`%s`='%s'" % (k,v) for k,v in record.items()]) sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % (DBConf.TABLE_DBINFO, sql, dbnode) utils.log(utils.cur(), sql) cur.execute(sql) else: record["dbnode"] = dbnode keys = ",".join(["`%s`" % k for k in record.keys()]) vals = ",".join(["'%%(%s)s'" % v for v in record.keys()]) vals = vals % record sql = "INSERT INTO `%s` (%s) VALUES (%s)" % (DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), sql) cur.execute(sql)
def _trans(cur, *args): cur.execute("START TRANSACTION") try: cur.execute( "SELECT * FROM `%(table)s` WHERE dbtype='%(dbtype)s'" % dbtype) dbnums = cur.fetchall() if len(dbnums) == 0: insert_sql = """INSERT INTO `%(table)s` (`dbtype`, `maxconn`, `disk`, `network`, `slowquery_duration`, `slowquery_times`, `tablenums`, `tablerows`, `tablesize`, `cputime`) VALUES ('%(dbtype)s', %(maxconn)d, %(disk)d, %(network)d, %(slowquery_duration)d, %(slowquery_times)d, %(tablenums)d, %(tablerows)d, %(tablesize)d, %(cputime)d)""" % dbtype utils.log(utils.cur(), insert_sql) cur.execute(insert_sql) cur.execute("COMMIT") except Exception, err: utils.log(utils.cur(), err) cur.execute("ROLLBACK")
def watcher_db_info(self, event, true_path): self.reload_zkdbinfo() dbs = self.get_path(true_path, child=True) dbs = dbs.keys() if dbs else [] # 1.watch nodes when db_info nodes are new newdbs = list(set(dbs) - set(self.dbs)) for db in newdbs: path_authed_ip = os.path.join(ZKConf.ZK_PATH_DB, db, ZKConf.KEY_IP) # 2. listen /database/db_info/xxx/authed_ips # reset zkdbinfo self.zk.watch_node(path_authed_ip, self.watcher_authed_ip) path_dbconf = os.path.join(ZKConf.ZK_PATH_DB, db, ZKConf.KEY_DBCONF) # 3. listen /database/db_info/xxx/dbconf/db_host_r # reset /database/db_info/xxx/dbconf/db_host_w # reset zkdbinfo # reset busy_proobj rwclient connections # reset idle_rwclient connections self.zk.watch_node(os.path.join(path_dbconf, ZKConf.KEY_READ), self.watcher_rw_ip) self.zk.watch_node(os.path.join(path_dbconf, ZKConf.KEY_WRITE), self.watcher_rw_ip) # 2.close connections when db_info nodes are deleted diff = list(set(self.dbs) - set(dbs)) utils.log(utils.cur(), self.dbs, dbs, diff) for db in diff: if db in self.servers: self.servers[db].close_conns() self.dbs = dbs
def register_watches(self): # 1. listen /database/db_info # reset zkdbinfo # close connections which are related to deleted nodes under db_info dbs = self.get_path(ZKConf.ZK_PATH_DB, child=True) self.zk.watch_child(ZKConf.ZK_PATH_DB, self.watcher_db_info) dbs = dbs.keys() if dbs else [] self.dbs = dbs for db in dbs: path_authed_ip = os.path.join(ZKConf.ZK_PATH_DB, db, ZKConf.KEY_IP) # 2. listen /database/db_info/xxx/authed_ips # reset zkdbinfo self.zk.watch_node(path_authed_ip, self.watcher_authed_ip) path_dbconf = os.path.join(ZKConf.ZK_PATH_DB, db, ZKConf.KEY_DBCONF) # 3. listen /database/db_info/xxx/dbconf/db_host_r # listen /database/db_info/xxx/dbconf/db_host_w # reset zkdbinfo # reset busy_proobj rwclient connections # reset idle_rwclient connections self.zk.watch_node(os.path.join(path_dbconf, ZKConf.KEY_READ), self.watcher_rw_ip) self.zk.watch_node(os.path.join(path_dbconf, ZKConf.KEY_WRITE), self.watcher_rw_ip) # 4. listen /database/authed_ips # reset zkdbinfo # reset self.ips ip_json = self.get_path(ZKConf.ZK_PATH_IPS) try: if ip_json: self.ips = ip_helper.IpRangeList(*tuple(cjson.decode(ip_json))) except Exception, err: utils.err(utils.cur(), err) self.ips = None
def unpack_auth_packet(buf): """ analyze authentication packet from client """ idx = skip_header() auth = { "client_flags": None, "max_packet_size": None, "charset_number": None, "user": None, "scramble_buff": None, "database": None } try: auth["client_flags"], idx = unpack_int32(buf, idx) auth["max_packet_size"], idx = unpack_int32(buf, idx) auth["charset_number"], idx = unpack_int8(buf, idx) idx = skip_packetn(23, idx) auth["user"], idx = unpack_string_null(buf, idx) scramble_len, idx = unpack_lenenc(buf, idx) auth["scramble_buff"], idx = unpack_string(buf, scramble_len, idx) if idx < len(buf): auth["database"], idx = unpack_string_null(buf, idx) except Exception, err: utils.err(utils.cur(), err)
def unpack_auth_packet(buf): """ analyze authentication packet from client """ idx = skip_header() auth = { "client_flags" : None, "max_packet_size" : None, "charset_number" : None, "user" : None, "scramble_buff" : None, "database" : None } try: auth["client_flags"], idx = unpack_int32(buf, idx) auth["max_packet_size"], idx = unpack_int32(buf, idx) auth["charset_number"], idx = unpack_int8(buf, idx) idx = skip_packetn(23, idx) auth["user"], idx = unpack_string_null(buf, idx) scramble_len, idx = unpack_lenenc(buf, idx) auth["scramble_buff"], idx = unpack_string(buf, scramble_len, idx) if idx < len(buf): auth["database"], idx = unpack_string_null(buf, idx) except Exception, err: utils.err(utils.cur(), err)
def _trans(cur, *args): def _exec(data, record): if len(data): sql = ",".join(["`%s`='%s'" % (k,v) for k,v in record.items()]) sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % (DBConf.TABLE_DBINFO, sql, dbnode) utils.log(utils.cur(), sql) cur.execute(sql) else: record["dbnode"] = dbnode keys = ",".join(["`%s`" % k for k in record.keys()]) vals = ",".join(["'%%(%s)s'" % v for v in record.keys()]) vals = vals % record sql = "INSERT INTO `%s` (%s) VALUES (%s)" % (DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("START TRANSACTION") for dbnode in dbnodes: if not dbnode: continue record = dict() for key, value in path.items(): node = os.path.join(root, dbnode, value) if self.zk.exists(node, None): (data, meta) = self.zk.get(node, None) record[key] = data cur.execute("SELECT * FROM `%s` WHERE `dbnode` = '%s'" % (DBConf.TABLE_DBINFO, dbnode)) dbinfo = cur.fetchall() _exec(dbinfo, record) # delete records that have been already deleted sql = "DELETE FROM `%s` WHERE `dbnode` not in (%s) " % (DBConf.TABLE_DBINFO, ",".join(["'"+d+"'" for d in dbnodes])) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("COMMIT")
def unpack_handshake_packet(buf): idx = skip_header() handshake = { "protocol_version" : None, "server_version" : None, "thread_id" : None, "scramble_1" : None, "server_capabilities" : None, "language" : None, "server_status" : None, "scramble_2" : None, "native_password" : None, "scramble" : None } try: handshake["protocol_version"], idx = unpack_int8(buf, idx) handshake["server_version"], idx = unpack_string_null(buf, idx) if idx == -1: sys.exit(0) handshake["thread_id"], idx = unpack_int32(buf, idx) handshake["scramble_1"], idx = unpack_string(buf, 8, idx) idx = skip_packetn(1, idx) handshake["server_capabilities"], idx = unpack_int16(buf, idx) handshake["language"], idx = unpack_int8(buf, idx) handshake["server_status"], idx = unpack_int16(buf, idx) idx = skip_packetn(13, idx) if handshake["server_capabilities"] & ServerCapability.CLIENT_SECURE_CONNECTION: handshake["scramble_2"], idx = unpack_string(buf, 12, idx) idx = skip_packetn(1, idx) if idx < len(buf): handshake["native_password"], idx = unpack_string_null(buf, idx) except Exception, msg: utils.err(utils.cur(), msg)
def unpack_handshake_packet(buf): idx = skip_header() handshake = { "protocol_version": None, "server_version": None, "thread_id": None, "scramble_1": None, "server_capabilities": None, "language": None, "server_status": None, "scramble_2": None, "native_password": None, "scramble": None } try: handshake["protocol_version"], idx = unpack_int8(buf, idx) handshake["server_version"], idx = unpack_string_null(buf, idx) if idx == -1: sys.exit(0) handshake["thread_id"], idx = unpack_int32(buf, idx) handshake["scramble_1"], idx = unpack_string(buf, 8, idx) idx = skip_packetn(1, idx) handshake["server_capabilities"], idx = unpack_int16(buf, idx) handshake["language"], idx = unpack_int8(buf, idx) handshake["server_status"], idx = unpack_int16(buf, idx) idx = skip_packetn(13, idx) if handshake[ "server_capabilities"] & ServerCapability.CLIENT_SECURE_CONNECTION: handshake["scramble_2"], idx = unpack_string(buf, 12, idx) idx = skip_packetn(1, idx) if idx < len(buf): handshake["native_password"], idx = unpack_string_null(buf, idx) except Exception, msg: utils.err(utils.cur(), msg)
def _clientAuth(self, data): """ 1. parse auth packet, db is mandatory """ auth = analyze_packet(data) if auth == -1 or not auth["database"]: self._write(self._goWrong(ErrorCode.PACKET_WRONG, self.next_idx)) return """ 2. retrieve database info from zookeeper and auth """ is_legal = self._zookeeperAuth(auth) utils.log(utils.cur(), "after zkauth", is_legal) """ 3. assign a pair of read/write connections when passed """ if is_legal: self.database = auth["database"] self.dbname = self.getDBname() utils.log(utils.cur(), self.database) def callback(me, old=False): # utils.log(utils.cur(), "auth inside") pkts = me.server.writeClient.protocol.packets if old: me._write(me._goRight(me.next_idx)) me.client_authed = True stats_conns.incr(self.database) utils.log(utils.cur(), stats_conns) return if pkts["err"]: me._write(pkts["err"]) else: """ ** NOTICE ** be care of index in pkts["ok"] and pkts["err"] """ # utils.log(utils.cur(), pkts) p = pkts["ok"][:3] + "\x02" + pkts["ok"][4:] me._write(p) me.client_authed = True stats_conns.incr(self.database) utils.log(utils.cur(), stats_conns) self.factory.getServer(self.database, callback, self)
def unpack_database(packet, idx = 0): idx = skip_header() tag, idx = unpack_int8(packet, idx) if tag == Command.COM_INIT_DB: cmd, idx = unpack_string(packet, len(packet) - idx, idx) utils.log(utils.cur(), cmd) return cmd return tag
def watcher_ip(self, event, true_path): self.reload_zkdbinfo() ip_json = self.get_path(true_path) try: self.ips = ip_helper.IpRangeList(*tuple(cjson.decode(ip_json))) except Exception, err: utils.err(utils.cur(), err) self.ips = None
def unpack_error_packet(buf): idx = skip_header() field_count, idx = unpack_int8(buf, idx) errno, idx = unpack_int16(buf, idx) sqlstate, idx = unpack_string(buf, 6, idx) # "#state" message, idx = unpack_string(buf, len(buf)-idx, idx) utils.err(utils.cur(), "%s|%s|%s|%s" % (field_count, errno, sqlstate, message)) return (field_count, errno, sqlstate, message)
def unpack_database(packet, idx=0): idx = skip_header() tag, idx = unpack_int8(packet, idx) if tag == Command.COM_INIT_DB: cmd, idx = unpack_string(packet, len(packet) - idx, idx) utils.log(utils.cur(), cmd) return cmd return tag
def _exec(data, record): if len(data): sql = ",".join( ["`%s`='%s'" % (k, v) for k, v in record.items()]) sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % ( DBConf.TABLE_DBINFO, sql, dbnode) utils.log(utils.cur(), sql) cur.execute(sql) else: record["dbnode"] = dbnode keys = ",".join(["`%s`" % k for k in record.keys()]) vals = ",".join(["'%%(%s)s'" % v for v in record.keys()]) vals = vals % record sql = "INSERT INTO `%s` (%s) VALUES (%s)" % ( DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), sql) cur.execute(sql)
def unpack_error_packet(buf): idx = skip_header() field_count, idx = unpack_int8(buf, idx) errno, idx = unpack_int16(buf, idx) sqlstate, idx = unpack_string(buf, 6, idx) # "#state" message, idx = unpack_string(buf, len(buf) - idx, idx) utils.err(utils.cur(), "%s|%s|%s|%s" % (field_count, errno, sqlstate, message)) return (field_count, errno, sqlstate, message)
def stopFactory(self): utils.log(utils.cur(), "dbproxy stopping") self.zk.close() self.mq.stop() self.monitor.stop() self.proxy_stats_log.stop() # time.sleep(0.5) self.close_free_servers() self.servers.stop() stats_conns.close()
def watcher_forbid(self, event, true_path): self.reload_zkdbinfo() dbs_forbid = self.get_path(true_path, child=True) forbidinfo = collections.defaultdict(dict) for db in dbs_forbid: try: forbid_db = self.get_path(os.path.join(true_path, db)) forbidinfo[db] = cjson.decode(forbid_db) except Exception, err: utils.err(utils.cur(), err)
def getDBname(self): # rewrite "USE $self.database" to "USE dbname" dbname = None try: dbpath = os.path.join(ZKConf.ZK_PATH_DB, self.database, ZKConf.KEY_DBCONF, ZKConf.KEY_DB) dbname = self.factory.get_path(dbpath) utils.log(utils.cur(), dbpath, dbname) except: pass return dbname
def sync_mysql(self): root = ZKConf.ZK_PATH_DB path = { "dbname": "dbconf/db_db", "dbtype": "dbtype", "dbdisabled": "db_disabled", "username": "******", "password": "******", "host_r": "dbconf/db_host_r", "host_w": "dbconf/db_host_w", "ips": "authed_ips" } dbnodes = self.zk.get_children(root, None) utils.log(utils.cur(), dbnodes) def _trans(cur, *args): def _exec(data, record): if len(data): sql = ",".join(["`%s`='%s'" % (k,v) for k,v in record.items()]) sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % (DBConf.TABLE_DBINFO, sql, dbnode) utils.log(utils.cur(), sql) cur.execute(sql) else: record["dbnode"] = dbnode keys = ",".join(["`%s`" % k for k in record.keys()]) vals = ",".join(["'%%(%s)s'" % v for v in record.keys()]) vals = vals % record sql = "INSERT INTO `%s` (%s) VALUES (%s)" % (DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("START TRANSACTION") for dbnode in dbnodes: if not dbnode: continue record = dict() for key, value in path.items(): node = os.path.join(root, dbnode, value) if self.zk.exists(node, None): (data, meta) = self.zk.get(node, None) record[key] = data cur.execute("SELECT * FROM `%s` WHERE `dbnode` = '%s'" % (DBConf.TABLE_DBINFO, dbnode)) dbinfo = cur.fetchall() _exec(dbinfo, record) # delete records that have been already deleted sql = "DELETE FROM `%s` WHERE `dbnode` not in (%s) " % (DBConf.TABLE_DBINFO, ",".join(["'"+d+"'" for d in dbnodes])) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("COMMIT") def _transaction(cur, *args): try: _trans(cur, *args) except Exception, err: cur.execute("ROLLBACK") utils.log(utils.cur(), err)
def getServer(self, db, callback, proObj): if db in self.servers and len(self.servers[db]): if callable(callback): utils.log(utils.cur(), "use old RWClient") try: proObj.server = self.servers.pop(db) proObj.server.changeProtocolObj(proObj) self.busy_proobj[db].append(proObj) except Exception, err: return proObj.pool_error(err) callback(proObj, True)
def connectionLost(self, reason): utils.reset_logconf() self.factory.conns -= 1 utils.log(utils.cur(), "client is losing proxy %s" % self.factory.conns, self.requests) # mysql_stmt_close if self.stmt_id: self.server.mysql_stmt_close(self.stmt_id) # reclaim connection self.factory.takeServer(self.server, self) # clean connection self.server = None self.timeout.stop()
def _exec(dblist, sql): LOGCONFIG = namedtuple( "LOGCONFIG", "id,dbname,dbtype,dbdisabled,username,password,host_r,host_w,ips,dbnode" ) for row in map(LOGCONFIG._make, dblist): utils.log(utils.cur(), row) dbpath = os.path.join(self.rootpath, row.dbnode) dbroot = self.zk.exists(dbpath, None) if not dbroot: self.zk.create(dbpath, "", [ZKConf.ZOO_CREATOR_ALL_ACL], 0) self.create_leafs(row, self.rootpath + "/" + row.dbnode)
def _exec(dblist, sql): LOGCONFIG = namedtuple("LOGCONFIG", "id,dbname,dbtype,dbdisabled,username,password,host_r,host_w,ips,dbnode") for row in map(LOGCONFIG._make, dblist): utils.log(utils.cur(), row) dbpath = os.path.join(self.rootpath, row.dbnode) dbroot = self.zk.exists(dbpath, None) if not dbroot: self.zk.create(dbpath, "", [ZKConf.ZOO_CREATOR_ALL_ACL], 0) self.create_leafs(row, self.rootpath + "/" + row.dbnode)
def _create(info, path): for key, value in info.iteritems(): subpath = "".join([path, "/", key]) subnode = self.zk.exists(subpath, None) if not subnode: if type(value) is not dict: utils.log(utils.cur(), subpath, value) self.zk.create(subpath, str(value), [ZKConf.ZOO_CREATOR_ALL_ACL], 0) else: self.zk.create(subpath, "", [ZKConf.ZOO_CREATOR_ALL_ACL], 0) _create(value, subpath)
def takeServer(self, server, proObj): if server and hasattr(server, "ready") and server.ready == 2: # clean SQL cache server.raw_sql_clear() db = server.getDB() stats_conns.decr(db) try: self.busy_proobj[db].remove(proObj) except: pass server.changeProtocolObj(None) self.servers.push(server.getDB(), server) utils.log(utils.cur(), stats_conns, self.servers)
def sendForbid(self, data, db, opts): """ check contraints of databases, tables and operations """ assert isinstance(data, dict) assert "object" in data and data["object"] in (Forbid.FORBID_DATABASE, Forbid.FORBID_TABLE) def _realForbid(crud, dbtb, errmsg, isDB=True): forbid_duration = int(data["start"] + data["duration"] - time.time()) if data["type"] == Forbid.FORBID_WORKING else sys.maxint forbid_error = dict(ErrorCode.QUOTA_EXCEEDED) forbid_error["message"] = forbid_error["message"] % (crud, "Database" if isDB else "Table", dbtb, errmsg, forbid_duration) self._write(self._goWrong(forbid_error, self.next_idx)) return True crud = data["crud"] utils.log(utils.cur(), crud) if not crud: return False if data["object"] == Forbid.FORBID_DATABASE: allopts = opts["db"] utils.log(utils.cur(), allopts) assert type(allopts) is list if Forbid.OPERATION_DEFAULT in crud: return _realForbid([Forbid.OPERATION_DEFAULT], db, data["errmsg"], True) else: if set(allopts).intersection(set(crud)): return _realForbid(crud, db, data["errmsg"], True) else: allopts = opts["tb"] utils.log(utils.cur(), allopts) assert type(allopts) is dict for tbl, tbopts in allopts.iteritems(): if tbl in crud: if Forbid.OPERATION_DEFAULT in crud[tbl]: return _realForbid([Forbid.OPERATION_DEFAULT], tbl, data["errmsg"], False) if set(tbopts).intersection(set(crud[tbl])): return _realForbid(crud[tbl], tbl, data["errmsg"], False) return False
def check_whitelist(self, wl, crud, token_ids): if len(self.tokens) < len(wl[2:])+1: return False i = 0 for kw in wl[2:]: i += 1 if self.tokens[i][0] != kw: return False tbl = None utils.log(utils.cur(), wl, wl[1], token_ids, self.tokens) if wl[1] != sce_token.TK_UNKNOWN: try: idx = token_ids.index(wl[1], i) utils.log(utils.cur(), wl, idx) if idx+1 < len(self.tokens) \ and self.tokens[idx+1][1] == sce_token.TK_LITERAL: tbl = self.tokens[idx+1][0] except: pass if tbl: if tbl in self.tbopts: self.tbopts[tbl].append(crud) else: self.tbopts[tbl] = [crud] return (True, tbl, wl)
def callback(me, old=False): # utils.log(utils.cur(), "auth inside") pkts = me.server.writeClient.protocol.packets if old: me._write(me._goRight(me.next_idx)) me.client_authed = True stats_conns.incr(self.database) utils.log(utils.cur(), stats_conns) return if pkts["err"]: me._write(pkts["err"]) else: """ ** NOTICE ** be care of index in pkts["ok"] and pkts["err"] """ # utils.log(utils.cur(), pkts) p = pkts["ok"][:3] + "\x02" + pkts["ok"][4:] me._write(p) me.client_authed = True stats_conns.incr(self.database) utils.log(utils.cur(), stats_conns)
def check_whitelist(self, wl, crud, token_ids): if len(self.tokens) < len(wl[2:]) + 1: return False i = 0 for kw in wl[2:]: i += 1 if self.tokens[i][0] != kw: return False tbl = None utils.log(utils.cur(), wl, wl[1], token_ids, self.tokens) if wl[1] != sce_token.TK_UNKNOWN: try: idx = token_ids.index(wl[1], i) utils.log(utils.cur(), wl, idx) if idx + 1 < len(self.tokens) and self.tokens[idx + 1][1] == sce_token.TK_LITERAL: tbl = self.tokens[idx + 1][0] except: pass if tbl: if tbl in self.tbopts: self.tbopts[tbl].append(crud) else: self.tbopts[tbl] = [crud] return (True, tbl, wl)
def unpack_ok_packet(buf): idx = skip_header() val, idx = unpack_int8(buf, idx) if val is not AuthConf.OK_STATUS: return False ok = { "affected_rows": None, "insert_id": None, "server_status": None, "warning_count": None, } try: ok["affected_rows"], idx = unpack_lenenc(buf, idx) ok["insert_id"], idx = unpack_lenenc(buf, idx) ok["server_status"], idx = unpack_int16(buf, idx) ok["warning_count"], idx = unpack_int16(buf, idx) except Exception, err: utils.err(utils.cur(), err)
def unpack_ok_packet(buf): idx = skip_header() val, idx = unpack_int8(buf, idx) if val is not AuthConf.OK_STATUS: return False ok = { "affected_rows" : None, "insert_id" : None, "server_status" : None, "warning_count" : None, } try: ok["affected_rows"], idx = unpack_lenenc(buf, idx) ok["insert_id"], idx = unpack_lenenc(buf, idx) ok["server_status"], idx = unpack_int16(buf, idx) ok["warning_count"], idx = unpack_int16(buf, idx) except Exception, err: utils.err(utils.cur(), err)
def _trans(cur, *args): def _exec(data, record): if len(data): sql = ",".join( ["`%s`='%s'" % (k, v) for k, v in record.items()]) sql = "UPDATE `%s` SET %s WHERE `dbnode`='%s'" % ( DBConf.TABLE_DBINFO, sql, dbnode) utils.log(utils.cur(), sql) cur.execute(sql) else: record["dbnode"] = dbnode keys = ",".join(["`%s`" % k for k in record.keys()]) vals = ",".join(["'%%(%s)s'" % v for v in record.keys()]) vals = vals % record sql = "INSERT INTO `%s` (%s) VALUES (%s)" % ( DBConf.TABLE_DBINFO, keys, vals) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("START TRANSACTION") for dbnode in dbnodes: if not dbnode: continue record = dict() for key, value in path.items(): node = os.path.join(root, dbnode, value) if self.zk.exists(node, None): (data, meta) = self.zk.get(node, None) record[key] = data cur.execute("SELECT * FROM `%s` WHERE `dbnode` = '%s'" % (DBConf.TABLE_DBINFO, dbnode)) dbinfo = cur.fetchall() _exec(dbinfo, record) # delete records that have been already deleted sql = "DELETE FROM `%s` WHERE `dbnode` not in (%s) " % ( DBConf.TABLE_DBINFO, ",".join(["'" + d + "'" for d in dbnodes])) utils.log(utils.cur(), sql) cur.execute(sql) cur.execute("COMMIT")